diff --git a/.gitignore b/.gitignore index 84610780c..f21e3d69a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ build .idea +.DS_Store diff --git a/BFS/1245.Tree-Diameter/1245.Tree-Diameter.cpp b/BFS/1245.Tree-Diameter/1245.Tree-Diameter.cpp index 579951bb5..8b5f7fc83 100644 --- a/BFS/1245.Tree-Diameter/1245.Tree-Diameter.cpp +++ b/BFS/1245.Tree-Diameter/1245.Tree-Diameter.cpp @@ -1,25 +1,23 @@ class Solution { - vector>adj; - int V; public: int treeDiameter(vector>& edges) { - V = edges.size()+1; - adj.resize(V); - for (auto edge:edges) + int n = edges.size()+1; + vector>next(n); + for (auto edge: edges) { - adj[edge[0]].push_back(edge[1]); - adj[edge[1]].push_back(edge[0]); + next[edge[0]].push_back(edge[1]); + next[edge[1]].push_back(edge[0]); } - - auto t1 = bfs(0); - auto t2 = bfs(t1.first); + auto t1 = bfs(next, 0); + auto t2 = bfs(next, t1.first); return t2.second; } - - pair bfs(int u) + + pair bfs(vector>&next, int u) { - vectordis(V, -1); + int n = next.size(); + vectordis(n, -1); queue q; q.push(u); @@ -30,7 +28,7 @@ class Solution { int t = q.front(); q.pop(); - for (auto it = adj[t].begin(); it != adj[t].end(); it++) + for (auto it = next[t].begin(); it != next[t].end(); it++) { int v = *it; if (dis[v] == -1) @@ -42,9 +40,9 @@ class Solution { } int maxDis = 0; - int nodeIdx; + int nodeIdx = 0; - for (int i = 0; i < V; i++) + for (int i = 0; i < n; i++) { if (dis[i] > maxDis) { @@ -53,5 +51,5 @@ class Solution { } } return make_pair(nodeIdx, maxDis); - } + } }; diff --git a/BFS/1568.Minimum-Number-of-Days-to-Disconnect-Island/1568.Minimum-Number-of-Days-to-Disconnect-Island.cpp b/BFS/1568.Minimum-Number-of-Days-to-Disconnect-Island/1568.Minimum-Number-of-Days-to-Disconnect-Island.cpp index 2346de173..26695a44b 100644 --- a/BFS/1568.Minimum-Number-of-Days-to-Disconnect-Island/1568.Minimum-Number-of-Days-to-Disconnect-Island.cpp +++ b/BFS/1568.Minimum-Number-of-Days-to-Disconnect-Island/1568.Minimum-Number-of-Days-to-Disconnect-Island.cpp @@ -6,26 +6,25 @@ class Solution { m = grid.size(); n = grid[0].size(); - int count = islands(grid); - if (count > 1) return 0; + int count = island(grid); + if (count > 1 || count == 0) return 0; for (int i=0; i 1) return 1; + int count = island(grid); + if (count>1 || count==0) return 1; grid[i][j] = 1; } - return 2; } - int islands(vector>& grid) + int island(vector>& grid) { - auto dir = vector>({{1,0},{-1,0},{0,1},{0,-1}}); auto visited = vector>(m, vector(n,0)); + auto dir = vector>({{1,0},{-1,0},{0,1},{0,-1}}); int count = 0; for (int i=0; i>q; q.push({i,j}); visited[i][j] = 1; - - count++; while (!q.empty()) { @@ -50,14 +47,16 @@ class Solution { int a = x+dir[k].first; int b = y+dir[k].second; if (a<0||a>=m||b<0||b>=n) continue; - if (grid[a][b]==0) continue; if (visited[a][b]==1) continue; + if (grid[a][b]==0) continue; + q.push({a,b}); - visited[a][b] = 1; + visited[a][b]=1; } } - if (count == 2) return 2; + count+=1; + if (count>1) return 2; } return count; } diff --git a/BFS/1568.Minimum-Number-of-Days-to-Disconnect-Island/Readme.md b/BFS/1568.Minimum-Number-of-Days-to-Disconnect-Island/Readme.md index b89ff211a..911bb9928 100644 --- a/BFS/1568.Minimum-Number-of-Days-to-Disconnect-Island/Readme.md +++ b/BFS/1568.Minimum-Number-of-Days-to-Disconnect-Island/Readme.md @@ -8,6 +8,8 @@ 再看一下例子,发现sample里的答案没有超过2的。细想一下,确实无论什么几何形状,都会有“角落”的格子。所谓的“角落”,指的是只有两个邻接点是与大陆相连。所以答案又可以缩小为不会超过2. -如今答案的可能只有0,1,2。答案是否为零很好判断,扫一遍全局看是否只有一个岛。答案是否为1呢?如果只要删除一块陆地就能使得岛的数量大于等于2,那么我们就遍历每一块陆地,假设删除它,再查看一下剩下的地形的岛的数量即可。遍历一块“删除之地”大概是o(900),然后每次搜索全局数一下剩余岛的个数也是o(900),总共的时间复杂度是o(810000),是可以接受的。如果答案不为1,那么答案就肯定是2了。 +如今答案的可能只有0,1,2。答案是否为零很好判断,扫一遍全局看是否已经存在多于一个岛,或者压根没有岛。 + +答案是否为1呢?这意味着只要删除一块陆地,就能使得岛的数量大于等于2,或者没有任何岛。于是我们可以遍历每一块陆地,假设删除它,再查看一下剩下的地形的岛的数量即可。遍历一块“删除之地”大概是o(900),然后每次搜索全局数一下剩余岛的个数也是o(900),总共的时间复杂度是o(810000),是可以接受的。如果答案不为1,那么答案就肯定是2了。 所以本题的本质就是一个暴力枚举+BFS搜索判断岛数量。 diff --git a/BFS/2093.Minimum-Cost-to-Reach-City-With-Discounts/2093.Minimum-Cost-to-Reach-City-With-Discounts.cpp b/BFS/2093.Minimum-Cost-to-Reach-City-With-Discounts/2093.Minimum-Cost-to-Reach-City-With-Discounts.cpp index e09121a60..21df69f74 100644 --- a/BFS/2093.Minimum-Cost-to-Reach-City-With-Discounts/2093.Minimum-Cost-to-Reach-City-With-Discounts.cpp +++ b/BFS/2093.Minimum-Cost-to-Reach-City-With-Discounts/2093.Minimum-Cost-to-Reach-City-With-Discounts.cpp @@ -1,41 +1,39 @@ -using PII = pair; using AI3 = array; class Solution { public: int minimumCost(int n, vector>& highways, int discounts) { - vector>next(n); + vector>>next(n); for (auto x: highways) { - next[x[0]].push_back({x[1], x[2]}); - next[x[1]].push_back({x[0], x[2]}); + int a = x[0], b = x[1], w = x[2]; + next[a].push_back({b, w}); + next[b].push_back({a, w}); } - - vector>cost(n, vector(discounts+1, INT_MAX)); - priority_queue, greater<>>pq; - pq.push({0,0,discounts}); // {cost, node, discounts} - + vector>cost(n, vector(discounts+1, INT_MAX)); // cost[city][discounts] + priority_queue, greater<>> pq; // {cost, city, discounts} + pq.push({0,0,discounts}); + while (!pq.empty()) { - auto [c, curNode, times] = pq.top(); + auto [c, cur, times] = pq.top(); pq.pop(); - if (c >= cost[curNode][times]) continue; - cost[curNode][times] = c; - if (curNode==n-1) return c; + if (c >= cost[cur][times]) continue; + cost[cur][times] = c; + if (cur == n-1) return c; - for (auto nxt: next[curNode]) + for (auto x: next[cur]) { - auto [nxtNode, len] = nxt; - if (cost[nxtNode][times]!=INT_MAX) continue; - pq.push({c+len, nxtNode, times}); - if (times>=1) - pq.push({c+len/2, nxtNode, times-1}); - } + auto [nxt, toll] = x; + if (cost[nxt][times]==INT_MAX) + pq.push({c+toll, nxt, times}); + if (times >= 1 && cost[nxt][times-1]==INT_MAX) + pq.push({c+toll/2, nxt, times-1}); + } } - return -1; - + return -1; } }; diff --git a/BFS/210.Course-Schedule-II/210.Course-Schedule-II.cpp b/BFS/210.Course-Schedule-II/210.Course-Schedule-II.cpp index 70e244298..6c49299f9 100644 --- a/BFS/210.Course-Schedule-II/210.Course-Schedule-II.cpp +++ b/BFS/210.Course-Schedule-II/210.Course-Schedule-II.cpp @@ -3,8 +3,8 @@ class Solution { vector findOrder(int numCourses, vector>& prerequisites) { int n = numCourses; - unordered_map>nextCourses(n); - unordered_mapdegree(n); + vector>nextCourses(n); + vectordegree(n, 0); for (auto edge: prerequisites) { diff --git a/BFS/2192.All-Ancestors-of-a-Node-in-a-Directed-Acyclic-Graph/2192.All-Ancestors-of-a-Node-in-a-Directed-Acyclic-Graph.cpp b/BFS/2192.All-Ancestors-of-a-Node-in-a-Directed-Acyclic-Graph/2192.All-Ancestors-of-a-Node-in-a-Directed-Acyclic-Graph.cpp new file mode 100644 index 000000000..bbad94f68 --- /dev/null +++ b/BFS/2192.All-Ancestors-of-a-Node-in-a-Directed-Acyclic-Graph/2192.All-Ancestors-of-a-Node-in-a-Directed-Acyclic-Graph.cpp @@ -0,0 +1,48 @@ +class Solution { +public: + vector> getAncestors(int n, vector>& edges) + { + vector>next(n); + vectorindegree(n); + for (auto edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].push_back(b); + indegree[b]++; + } + + queueq; + for (int i=0; i>rets(n); + while (!q.empty()) + { + int cur = q.front(); + q.pop(); + for (auto x: next[cur]) + { + for (auto y: rets[cur]) + rets[x].insert(y); + rets[x].insert(cur); + indegree[x]--; + if (indegree[x]==0) + q.push(x); + } + } + + vector>ans(n); + for (int i=0; i distanceToCycle(int n, vector>& edges) + { + vectordegree(n); + vector>next(n); + for (auto edge: edges) + { + int a = edge[0], b = edge[1]; + degree[a]++; + degree[b]++; + next[a].push_back(b); + next[b].push_back(a); + } + + vectorrets(n); + queueq; + for (int i=0; i>& grid) + { + int m = grid.size(); + int n = grid[0].size(); + + vector>person = bfs(grid, 0); + vector>fire = bfs(grid, 1); + + if (person[m-1][n-1]==INT_MAX || person[m-1][n-1] > fire[m-1][n-1]) + return -1; + if (fire[m-1][n-1]==INT_MAX) + return 1e9; + + int D = fire[m-1][n-1]-person[m-1][n-1]; + + if (fire[m-1][n-2] == fire[m-2][n-1]) return D-1; + if (fire[m-1][n-2] < fire[m-2][n-1]) + { + if (person[m-2][n-1] == person[m-1][n-1]-1) + return D; + } + else + { + if (person[m-1][n-2] == person[m-1][n-1]-1) + return D; + } + + return D-1; + } + + vector>bfs(vector>&grid, int type) + { + int m = grid.size(); + int n = grid[0].size(); + vector>rets(m, vector(n,INT_MAX)); + queue>q; + + if (type==0) + { + q.push({0,0}); + rets[0][0] = 0; + } + else + { + for (int i=0; i>dir({{1,0},{-1,0},{0,1},{0,-1}}); + int step = 0; + while (!q.empty()) + { + int len = q.size(); + while (len--) + { + auto [x,y] = q.front(); + q.pop(); + + for (auto& [dx, dy] : dir) + { + int i = x+dx, j = y+dy; + if (i<0||i>=m||j<0||j>=n) continue; + if (grid[i][j]==2) continue; + if (rets[i][j]!=INT_MAX) continue; + rets[i][j] = step+1; + if (i!=m-1 || j!=n-1) + q.push({i,j}); + } + } + step++; + } + + return rets; + } +}; diff --git a/BFS/2258.Escape-the-Spreading-Fire/2258.Escape-the-Spreading-Fire_v2.cpp b/BFS/2258.Escape-the-Spreading-Fire/2258.Escape-the-Spreading-Fire_v2.cpp new file mode 100644 index 000000000..aa967a04d --- /dev/null +++ b/BFS/2258.Escape-the-Spreading-Fire/2258.Escape-the-Spreading-Fire_v2.cpp @@ -0,0 +1,106 @@ +class Solution { + vector>dir = {{1,0},{-1,0},{0,1},{0,-1}}; +public: + int maximumMinutes(vector>& grid) + { + int m = grid.size(); + int n = grid[0].size(); + + vector>person = bfs(grid, 0); + vector>fire = bfs(grid, 1); + + if (person[m-1][n-1]==INT_MAX || person[m-1][n-1] > fire[m-1][n-1]) + return -1; + if (fire[m-1][n-1]==INT_MAX) + return 1e9; + + int D = fire[m-1][n-1]-person[m-1][n-1]; + if (checkOK(grid, fire, D)) + return D; + else + return D-1; + } + + bool checkOK(vector>& grid, vector>fire, int D) + { + int m = grid.size(); + int n = grid[0].size(); + vector>visited(m, vector(n)); + + queue>q; + q.push({0,0}); + visited[0][0] = 1; + int step = D; + while (!q.empty()) + { + int len = q.size(); + while (len--) + { + auto [x,y] = q.front(); + q.pop(); + for (auto& [dx, dy] : dir) + { + int i = x+dx, j = y+dy; + if (i<0||i>=m||j<0||j>=n) continue; + if (grid[i][j]==2) continue; + if (visited[i][j]) continue; + if ((i!=m-1 || j!=n-1) && step+1 >= fire[i][j]) continue; + q.push({i,j}); + visited[i][j] = 1; + if (i==m-1 && j==n-1 && step+1 <= fire[m-1][n-1]) return true; + } + } + step++; + } + return false; + } + + vector>bfs(vector>&grid, int type) + { + int m = grid.size(); + int n = grid[0].size(); + vector>rets(m, vector(n,INT_MAX)); + queue>q; + + if (type==0) + { + q.push({0,0}); + rets[0][0] = 0; + } + else + { + for (int i=0; i=m||j<0||j>=n) continue; + if (grid[i][j]==2) continue; + if (rets[i][j]!=INT_MAX) continue; + rets[i][j] = step+1; + if (i!=m-1 || j!=n-1) + q.push({i,j}); + } + } + step++; + } + + return rets; + } +}; diff --git a/BFS/2258.Escape-the-Spreading-Fire/2258.Escape-the-Spreading-Fire_v3.cpp b/BFS/2258.Escape-the-Spreading-Fire/2258.Escape-the-Spreading-Fire_v3.cpp new file mode 100644 index 000000000..57c049894 --- /dev/null +++ b/BFS/2258.Escape-the-Spreading-Fire/2258.Escape-the-Spreading-Fire_v3.cpp @@ -0,0 +1,112 @@ +class Solution { + vector>dir = {{1,0},{-1,0},{0,1},{0,-1}}; +public: + int maximumMinutes(vector>& grid) + { + int m = grid.size(); + int n = grid[0].size(); + + vector>person = bfs(grid, 0); + vector>fire = bfs(grid, 1); + + if (person[m-1][n-1]==INT_MAX || person[m-1][n-1] > fire[m-1][n-1]) + return -1; + if (fire[m-1][n-1]==INT_MAX) + return 1e9; + + int left = 0, right = m*n; + while (left < right) + { + int mid = right-(right-left)/2; + if (checkOK(grid, fire, mid)) + left = mid; + else + right = mid-1; + } + if (checkOK(grid, fire, left)) return left; + else return -1; + } + + bool checkOK(vector>& grid, vector>fire, int D) + { + int m = grid.size(); + int n = grid[0].size(); + vector>visited(m, vector(n)); + + queue>q; + q.push({0,0}); + visited[0][0] = 1; + int step = D; + while (!q.empty()) + { + int len = q.size(); + while (len--) + { + auto [x,y] = q.front(); + q.pop(); + for (auto& [dx, dy] : dir) + { + int i = x+dx, j = y+dy; + if (i<0||i>=m||j<0||j>=n) continue; + if (grid[i][j]==2) continue; + if (visited[i][j]) continue; + if ((i!=m-1 || j!=n-1) && step+1 >= fire[i][j]) continue; + q.push({i,j}); + visited[i][j] = 1; + if (i==m-1 && j==n-1 && step+1 <= fire[m-1][n-1]) return true; + } + } + step++; + } + return false; + } + + vector>bfs(vector>&grid, int type) + { + int m = grid.size(); + int n = grid[0].size(); + vector>rets(m, vector(n,INT_MAX)); + queue>q; + + if (type==0) + { + q.push({0,0}); + rets[0][0] = 0; + } + else + { + for (int i=0; i=m||j<0||j>=n) continue; + if (grid[i][j]==2) continue; + if (rets[i][j]!=INT_MAX) continue; + rets[i][j] = step+1; + if (i!=m-1 || j!=n-1) + q.push({i,j}); + } + } + step++; + } + + return rets; + } +}; diff --git a/BFS/2258.Escape-the-Spreading-Fire/2258.Escape-the-Spreading-Fire_v4.cpp b/BFS/2258.Escape-the-Spreading-Fire/2258.Escape-the-Spreading-Fire_v4.cpp new file mode 100644 index 000000000..072c97f72 --- /dev/null +++ b/BFS/2258.Escape-the-Spreading-Fire/2258.Escape-the-Spreading-Fire_v4.cpp @@ -0,0 +1,122 @@ +class Solution { + vector>dir = {{1,0},{-1,0},{0,1},{0,-1}}; +public: + int maximumMinutes(vector>& grid) + { + int m = grid.size(); + int n = grid[0].size(); + + vector>person = bfs(grid, 0); + vector>fire = bfs(grid, 1); + + if (person[m-1][n-1]==INT_MAX || person[m-1][n-1] > fire[m-1][n-1]) + return -1; + if (fire[m-1][n-1]==INT_MAX) + return 1e9; + + vector>visited(m, vector(n)); + priority_queue>pq; + pq.push({fire[m-1][n-1], m-1, n-1}); + while (!pq.empty()) + { + auto [t, x, y] = pq.top(); + pq.pop(); + if (visited[x][y]) continue; + visited[x][y] = 1; + if (x==0 && y==0) return t; + + for (auto& [dx, dy] : dir) + { + int i = x+dx, j = y+dy; + if (i<0||i>=m||j<0||j>=n) continue; + if (grid[i][j]==2) continue; + if (visited[i][j]) continue; + pq.push({min(t-1,fire[i][j]-1), i,j}); + } + } + return -1; + } + + bool checkOK(vector>& grid, vector>fire, int D) + { + int m = grid.size(); + int n = grid[0].size(); + vector>visited(m, vector(n)); + + queue>q; + q.push({0,0}); + visited[0][0] = 1; + int step = D; + while (!q.empty()) + { + int len = q.size(); + while (len--) + { + auto [x,y] = q.front(); + q.pop(); + for (auto& [dx, dy] : dir) + { + int i = x+dx, j = y+dy; + if (i<0||i>=m||j<0||j>=n) continue; + if (grid[i][j]==2) continue; + if (visited[i][j]) continue; + if ((i!=m-1 || j!=n-1) && step+1 >= fire[i][j]) continue; + q.push({i,j}); + visited[i][j] = 1; + if (i==m-1 && j==n-1 && step+1 <= fire[m-1][n-1]) return true; + } + } + step++; + } + return false; + } + + vector>bfs(vector>&grid, int type) + { + int m = grid.size(); + int n = grid[0].size(); + vector>rets(m, vector(n,INT_MAX)); + queue>q; + + if (type==0) + { + q.push({0,0}); + rets[0][0] = 0; + } + else + { + for (int i=0; i=m||j<0||j>=n) continue; + if (grid[i][j]==2) continue; + if (rets[i][j]!=INT_MAX) continue; + rets[i][j] = step+1; + if (i!=m-1 || j!=n-1) + q.push({i,j}); + } + } + step++; + } + + return rets; + } +}; diff --git a/BFS/2258.Escape-the-Spreading-Fire/Readme.md b/BFS/2258.Escape-the-Spreading-Fire/Readme.md new file mode 100644 index 000000000..f6787845b --- /dev/null +++ b/BFS/2258.Escape-the-Spreading-Fire/Readme.md @@ -0,0 +1,46 @@ +### 2258.Escape-the-Spreading-Fire + +#### 解法1:2遍BFS +首先,我们容易走两遍BFS,得到两个新矩阵person和fire。其中person[i][j]表示人从起点到达(i,j)点的最短时间,fire[i][j]表示任意火源到达(i,j)点的最短时间。显然我们的关注点就在右下角的位置。我们容易依次做出这些判断: +1. ```person[m-1][n-1] == INT_MAX```,说明人永远到达不了终点,返回-1 +2. ```person[m-1][n-1] > fire[m-1][j]```,说明人永远不会比火更早到达终点,返回-1 +3. ```fire[m-1][n-1] == INT_MAX```,说明火永远不会达到终点,那么人可以慢慢走,返回1e9 +4. 剩下的情况,必然是```person[m-1][j]<=fire[m-1][n-1]```. 我们令```D = fire[m-1][j]-person[m-1][n-1]```表示人比火早到的天数。接下来针对这种情况做详细的分析。 + +首先,我们保守一点,如果让人停留D-1天再出发,那么人最终达到终点的时间,恰好是火到达终点的前一天,所以还是符合题意的。因此D-1至少是一个解。有人会问,停留D-1天出发,意味着原先人到终点的最短路径要全体抬升D-1,如果保证这条路径上所有的位置依然有```person[i][j]+D-1 < fire[i][j]```呢?事实上,如果停留D-1天,造成人与火同时到达中间某一个位置,那么火就可以借用这条“路径”,必然也会造成人与火同时到达终点。这与之前“人停留D-1天出发,依然会比火早一天到达终点”矛盾。因此这个担忧是不必要的。 + +接下来是分析的难点。本题虽然不允许中途的任何位置人与火同时到达,但是允许终点位置人与火可以同时到达。所以我们需要考虑再多停留一天,也就是D是否是可行的答案。表面上,人停留D天后,依然可以与火同时到达终点,但是我们发现,如果人与火share同一条路径时,是不合题意的。如下图 +``` + F + O +P O O O D +``` +P可以走4步到达终点D,F可以走5步到达终点D。但事实上,P不能停留一天再出发(即5-4)。因为停留的话,人与火会在非终点的位置相遇,这就不合条件。那么该如何判定,人最多是否可以停留D天再出发呢?这个结论比较难总结。答案是:人到终点的最快路径,和火到终点的最快路径,必须是从两个不同方向,即(m-2,n-1)或者(m-1,n-2)进入终点。如下图 + +``` + F + O +P O O O D +``` +P可以走4步到达终点D,F可以走2步到达终点D。这中情况下,人可以停留两天再出发(即4-2)。因为人与火的最优路径只在终点才相遇。 + +所以判定D是否能是答案的过程如下顺序: +1. 如果```fire[m-1][n-2]==fire[m-2][n-1]```,说明两个方向上火都是最快路径,人无法避开,所以D不能是答案。 +2. 如果```fire[m-1][n-2]fire[m-2][n-1]```,说明从上边来是火的最快路径,那么我们希望人的最快路径是从左边来,故需查看是否```person[m-1][n-2]==person[m-1][n-1]-1````。是的话返回D。 + +判定完以上之后,就返回D-1. + +#### 解法2:3遍BFS +在解法1中,我们的难点在于判断D是否是可行的解。其实有一个直观的做法,就是让人在起点停留D天,然后查看一下能否通过BFS顺利到达终点。BFS过程的要求就是人到任何一个中间位置(i,j)的时间,必须小于fire[i][j],否则就不能加入队列。如果最终能到达终点,就返回D,否则返回D-1. + +#### 解法3:二分搜值 +既然解法2中写了check函数来判断人停留D天后是否能到终点,那么我们索性就利用这个函数来二分搜值,找到最大的天数使得check的结果是OK。 + +#### 解法4:BFS+反向Dijkstra +在正向BFS求得fire矩阵之后,假设人到达终点的时间就是fire[m-1][n-1],然后反向从右下角开始Dijkstra,求得右下角到矩阵每个位置的最短路径。也就是说,我们可以求得一个矩阵,ret[i][j]表示人最晚什么时候到达(i,j),才能保证能在fire[m-1][n-1]时刻到达右下角。 + +举个例子,假设右下角的初始时刻是9,相邻的两个位置上有fire[m-1][n-2]是8,fire[m-2][n-1]是10;那么rets[m-1][n-2]就是7,fire[m-1][n-2]是8. Dijkstra的传播过程要遵守两个规则:随着BFS的过程,ret[i][j]必须逐步是变小;其次任何位置上,ret[i][j]必须小于fire[i][j]. 这也解释了为什么得用Dijkstra和PQ,而不是传统的BFS和队列,这是因为相邻两点之间的时间差不一定是1. + +最终的答案是ret[0][0]. + diff --git a/BFS/2290.Minimum-Obstacle-Removal-to-Reach-Corner/2290.Minimum-Obstacle-Removal-to-Reach-Corner.cpp b/BFS/2290.Minimum-Obstacle-Removal-to-Reach-Corner/2290.Minimum-Obstacle-Removal-to-Reach-Corner.cpp new file mode 100644 index 000000000..10b6f1bb8 --- /dev/null +++ b/BFS/2290.Minimum-Obstacle-Removal-to-Reach-Corner/2290.Minimum-Obstacle-Removal-to-Reach-Corner.cpp @@ -0,0 +1,88 @@ +class Solution { + vector>dir = {{1,0},{-1,0},{0,1},{0,-1}}; +public: + int minimumObstacles(vector>& grid) + { + int m = grid.size(); + int n = grid[0].size(); + if (m==1 && n==1) return 0; + vector>visited(m, vector(n,0)); + + queue>q; + q.push({0,0}); + visited[0][0] = 1; + + int step = 0; + while (!q.empty()) + { + int len = q.size(); + while (len--) + { + auto [x, y] = q.front(); + q.pop(); + + for (auto [dx, dy]: dir) + { + int i = x+dx; + int j = y+dy; + + if (i<0||i>=m||j<0||j>=n) continue; + if (visited[i][j]==1) continue; + if (grid[i][j] == 1) + { + visited[i][j] = 1; + q.push({i,j}); + } + else + { + for (auto [ii, jj]: travel(grid, visited, i, j)) + { + if (ii==m-1 && jj==n-1) + return step; + q.push({ii,jj}); + } + } + } + } + step++; + } + return 0; + } + + vector>travel(vector>& grid, vector>& visited, int x0, int y0) + { + int m = grid.size(); + int n = grid[0].size(); + + if (x0==m-1 && y0==n-1) + return {{x0, y0}}; + + queue>q; + q.push({x0,y0}); + visited[x0][y0] = 1; + + vector>rets; + while (!q.empty()) + { + auto [x, y] = q.front(); + q.pop(); + + for (auto [dx, dy] : dir) + { + int i = x+dx; + int j = y+dy; + if (i<0||i>=m||j<0||j>=n) continue; + if (visited[i][j]==1) continue; + visited[i][j] = 1; + if (i==m-1 && j==n-1) + rets.push_back({i,j}); + else if (grid[i][j]==1) + rets.push_back({i,j}); + else + q.push({i,j}); + } + } + + return rets; + } +}; diff --git a/BFS/2290.Minimum-Obstacle-Removal-to-Reach-Corner/Readme.md b/BFS/2290.Minimum-Obstacle-Removal-to-Reach-Corner/Readme.md new file mode 100644 index 000000000..8c97b2875 --- /dev/null +++ b/BFS/2290.Minimum-Obstacle-Removal-to-Reach-Corner/Readme.md @@ -0,0 +1,5 @@ +### 2290.Minimum-Obstacle-Removal-to-Reach-Corner + +本题的本质就是从起点到终点,采用层级BFS,最少需要穿越几个回合的障碍。而障碍与障碍之间的空气,可以忽略不计。也就是说,某个障碍与空气相邻的话,下一个回合可以通过空气到达其他的障碍。 + +在实现过程中,除了常规的层级BFS之外,我们还需要有一个travelAir的函数。travelAir以某个空格子为起点,遍历所有能“隔空”访问的障碍物。这些障碍物需要加入下一回合BFS的队列中去。 diff --git a/BFS/2392.Build-a-Matrix-With-Conditions/2392.Build-a-Matrix-With-Conditions.cpp b/BFS/2392.Build-a-Matrix-With-Conditions/2392.Build-a-Matrix-With-Conditions.cpp new file mode 100644 index 000000000..0029321aa --- /dev/null +++ b/BFS/2392.Build-a-Matrix-With-Conditions/2392.Build-a-Matrix-With-Conditions.cpp @@ -0,0 +1,58 @@ +class Solution { +public: + vectortopo(int k, vector>& conditions) + { + vectorindegree(k+1); + vector>next(k+1); + for (auto& x: conditions) + { + next[x[0]].push_back(x[1]); + indegree[x[1]] += 1; + } + + queueq; + for (int i=1; i<=k; i++) + if (indegree[i]==0) + q.push(i); + + vectorrets; + while (!q.empty()) + { + int cur = q.front(); + q.pop(); + rets.push_back(cur); + for (auto x: next[cur]) + { + indegree[x]--; + if (indegree[x]==0) + q.push(x); + } + } + + if (rets.size() != k) return {}; + + return rets; + } + + vector> buildMatrix(int k, vector>& rowConditions, vector>& colConditions) + { + vectorrow = topo(k, rowConditions); + vectorcol = topo(k, colConditions); + + if (row.empty() || col.empty()) return {}; + + vector>pos(k+1); + for (int i=0; i>matrix(k, vector(k)); + for (int i=1; i<=k; i++) + matrix[pos[i].first][pos[i].second] = i; + + return matrix; + + } +}; diff --git a/BFS/2392.Build-a-Matrix-With-Conditions/Readme.md b/BFS/2392.Build-a-Matrix-With-Conditions/Readme.md new file mode 100644 index 000000000..c3ac372e2 --- /dev/null +++ b/BFS/2392.Build-a-Matrix-With-Conditions/Readme.md @@ -0,0 +1,7 @@ +### 2392.Build-a-Matrix-With-Conditions + +因为本题只需要在k by k的方阵中填写k个数字,所以我们可以让所有数字都不同行不同列。这样横向的拓扑关系和纵向的拓扑关系可以独立处理,互不干扰。 + +比如说,我们从横向的关系得到必须从左往右填写 a,b,c,d,再从纵向的关系得到必须从上往下填写b,d,c,a,那么我们就可以令a的纵坐标是0,横坐标是3. 以此类推。 + +特别注意,如果想用尽量小的的矩阵来填写k个数字,或者填写数字的个数大于矩阵的维度,那么此题难度会很大。比如,如果横向的拓扑关系是a,b,c必须在d的左边,这个时候我们就需要考虑a,b,c是否可以在同一列:但是这一列是否可以装下这么多元素?如果装不下,我们在这一列选填那几个? diff --git a/BFS/2440.Create-Components-With-Same-Value/2440.Create-Components-With-Same-Value.cpp b/BFS/2440.Create-Components-With-Same-Value/2440.Create-Components-With-Same-Value.cpp new file mode 100644 index 000000000..671b414de --- /dev/null +++ b/BFS/2440.Create-Components-With-Same-Value/2440.Create-Components-With-Same-Value.cpp @@ -0,0 +1,79 @@ +class Solution { +public: + int componentValue(vector& nums, vector>& edges) + { + int n = nums.size(); + if (n==1) return 0; + + vector>next(n); + vectorindegree(n); + + for (auto& edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].push_back(b); + next[b].push_back(a); + indegree[b]++; + indegree[a]++; + } + + int total = accumulate(nums.begin(), nums.end(), 0); + + vectorsums; + for (int s=1; s*s <= total; s++) + { + if (total % s!=0) continue; + sums.push_back(s); + sums.push_back(total/s); + } + sort(sums.begin(), sums.end()); + + for (auto s: sums) + { + vectorin = indegree; + queueq; + vectorvisited(n,0); + vectorsum = nums; + + for (int i=0; i s) + { + flag = false; + break; + } + else if (sum[cur] == s) + sum[cur] = 0; + + for (int nxt: next[cur]) + { + if (visited[nxt]) continue; + sum[nxt] += sum[cur]; + in[nxt]--; + + if (in[nxt]==1) + { + visited[nxt] = 1; + q.push(nxt); + } + } + } + + if (flag) return total/s-1; + } + + return 0; + } +}; diff --git a/BFS/2440.Create-Components-With-Same-Value/Readme.md b/BFS/2440.Create-Components-With-Same-Value/Readme.md new file mode 100644 index 000000000..32ff53f5d --- /dev/null +++ b/BFS/2440.Create-Components-With-Same-Value/Readme.md @@ -0,0 +1,10 @@ +### 2440.Create-Components-With-Same-Value + +假设我们想要把这张图平均分成k份,那么每一份联通块的元素和s我们是知道的。假设我们可以实现这样的拆分,从图中我们显然可以发现,每一份符合条件的联通块必然可以从外往内一点一点剥离下来。这就提示我们可以用拓扑排序的方法。我们记录sum[i]表示从外往内“剥洋葱”的过程中,剥到节点i时所对应的“子树”的节点元素之和。 +1. 如果`sum[i]==s`,那么说明这棵子树就是符合条件的一个联通块,我们就彻底剥离,节点i不传递信息给它的上级。 +2. 如果`sum[i]s`,那么说明这棵子树的元素和太大了,不能构成一个合法的联通块,终止基于s的进一步的尝试。 + +我们重复上述剥洋葱的过程,如果把所有节点都剥完,依然没有报错,那么说明我们恰好把整张图剥离成了一个个元素和为s的子树。 + +显然,拓扑排序的复杂度是o(N)。那么我们需要尝试多少个s呢?s的个数应该是total的因子个数,即sqrt(total)。其中total是整张图的元素之和。综上,总的时间复杂度恰好就是1e6级别。 diff --git a/BFS/2473.Minimum-Cost-to-Buy-Apples/2473.Minimum-Cost-to-Buy-Apples.cpp b/BFS/2473.Minimum-Cost-to-Buy-Apples/2473.Minimum-Cost-to-Buy-Apples.cpp new file mode 100644 index 000000000..8cff24f9e --- /dev/null +++ b/BFS/2473.Minimum-Cost-to-Buy-Apples/2473.Minimum-Cost-to-Buy-Apples.cpp @@ -0,0 +1,44 @@ +using LL = long long; +using PII = pair; +class Solution { + vectornext[1005]; +public: + vector minCost(int n, vector>& roads, vector& appleCost, int k) + { + for (auto& road: roads) + { + int a = road[0], b = road[1], c = road[2]; + next[a].push_back({b,c}); + next[b].push_back({a,c}); + } + + vectorrets; + for (int i=1; i<=n; i++) + { + priority_queue, greater<>>pq; + pq.push({0, i}); + vectorvisited(n+1, -1); + while (!pq.empty()) + { + auto [dist, cur] = pq.top(); + pq.pop(); + if (visited[cur] == -1) + visited[cur] = dist; + + for (auto [nxt, len]: next[cur]) + { + if (visited[nxt]!=-1) continue; + pq.push({dist + len*(1+k), nxt}); + } + } + + LL ret = LLONG_MAX; + for (int i=1; i<=n; i++) + if (visited[i]!=-1) + ret = min(ret, appleCost[i-1]+visited[i]); + rets.push_back(ret); + } + + return rets; + } +}; diff --git a/BFS/2473.Minimum-Cost-to-Buy-Apples/Readme.md b/BFS/2473.Minimum-Cost-to-Buy-Apples/Readme.md new file mode 100644 index 000000000..051cb149a --- /dev/null +++ b/BFS/2473.Minimum-Cost-to-Buy-Apples/Readme.md @@ -0,0 +1,3 @@ +### 2473.Minimum-Cost-to-Buy-Apples + +无脑对每个起点都做一遍Dijkstra找到所有位置的最短路径即可。 diff --git a/BFS/2493.Divide-Nodes-Into-the-Maximum-Number-of-Groups/2493.Divide-Nodes-Into-the-Maximum-Number-of-Groups.cpp b/BFS/2493.Divide-Nodes-Into-the-Maximum-Number-of-Groups/2493.Divide-Nodes-Into-the-Maximum-Number-of-Groups.cpp new file mode 100644 index 000000000..f7e5b0c5d --- /dev/null +++ b/BFS/2493.Divide-Nodes-Into-the-Maximum-Number-of-Groups/2493.Divide-Nodes-Into-the-Maximum-Number-of-Groups.cpp @@ -0,0 +1,63 @@ +class Solution { + vectornext[505]; +public: + int magnificentSets(int n, vector>& edges) + { + for (auto edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].push_back(b); + next[b].push_back(a); + } + + unordered_mapMap; + + for (int start=1; start<=n; start++) + { + int d = 0; + int smallestId = INT_MAX; + vectorlevel(505); + + queueq; + q.push(start); + level[start] = 1; + + while (!q.empty()) + { + d++; + int len = q.size(); + while (len--) + { + int cur = q.front(); + q.pop(); + smallestId = min(smallestId, cur); + + for (int nxt: next[cur]) + { + if (level[nxt] == 0) + { + level[nxt] = d+1; + q.push(nxt); + } + else if (level[nxt] == d) + { + return -1; + } + } + } + } + + Map[smallestId] = max(Map[smallestId], d); + } + + int ret = 0; + for (auto [k, v]: Map) + ret += v; + + return ret; + } +}; + + + + diff --git a/BFS/2493.Divide-Nodes-Into-the-Maximum-Number-of-Groups/Readme.md b/BFS/2493.Divide-Nodes-Into-the-Maximum-Number-of-Groups/Readme.md new file mode 100644 index 000000000..8fe7b181d --- /dev/null +++ b/BFS/2493.Divide-Nodes-Into-the-Maximum-Number-of-Groups/Readme.md @@ -0,0 +1,11 @@ +### 2493.Divide-Nodes-Into-the-Maximum-Number-of-Groups + +本题的突破点是,只要我们能确定一个节点作为第一个group,那么剩下的节点该如何安排其实是可以贪心地确定的:很显然只要用BFS进行层级遍历即可,就像生成一棵树一样,把同属于一个层级的放入一个group,不停往下扩展,这样就可以得到最多的层级(也就是group)。 + +因为本题要求同一个group不能有边,所以我们需要检查一下这种方法得到的拓扑结构:是否有任何点指向了同一层级的其他点。有的话就标记BFS。特别注意的是,如果发现了这种情况,不仅意味着从当前根节点出发的BFS无解,也意味着整个连通图无解,即你从此连通图的任何一个位置作为根,都无法得到合法的层级结构。 + +因此,遍历起点的循环是V次,每次BFS需要至多访问E条边。总的时间复杂度是o(VE)恰好符合要求。 + +有人会问,以上的方法约定了第一个group只能有一个节点(看做是根)。可不可能有两个节点A与B都是处于第一个group的最优解呢?答案是不会更优。当A与B(第一层级)都和C(第二层级)联通时,我们其实按照之前的方法,会把B看做是第三个层级,显然这个方案能够得到更多的group。 + +此外,本题可能会有多个不同的联通区域。最终答案是每个联通区域所能构造出的最大group数量之和。一种处理方法是先用Union Find把不同联通区域的节点都标记出来,接着再遍历每个联通区域,变换根的位置去做BFS的尝试。另一种处理方法可以直接遍历每个节点作为根,BFS完之后记得将遇到的最小编号的节点作为联通区域的代号,最后我们将不同联通区域的答案再相加。 diff --git a/BFS/2503.Maximum-Number-of-Points-From-Grid-Queries/2503.Maximum-Number-of-Points-From-Grid-Queries.cpp b/BFS/2503.Maximum-Number-of-Points-From-Grid-Queries/2503.Maximum-Number-of-Points-From-Grid-Queries.cpp new file mode 100644 index 000000000..34b8511bc --- /dev/null +++ b/BFS/2503.Maximum-Number-of-Points-From-Grid-Queries/2503.Maximum-Number-of-Points-From-Grid-Queries.cpp @@ -0,0 +1,47 @@ +using AI3 = array; +class Solution { +public: + vector maxPoints(vector>& grid, vector& queries) + { + vector>dir({{0,1},{0,-1},{1,0},{-1,0}}); + vector>qs; + for (int i=0; irets(queries.size()); + + priority_queue, greater<>>pq; + pq.push({grid[0][0], 0, 0}); + + int count = 0; + int m = grid.size(), n = grid[0].size(); + vector>visited(m, vector(n)); + visited[0][0] = 1; + + for (auto [q, idx]: qs) + { + while (!pq.empty() && pq.top()[0] < q) + { + int i = pq.top()[1], j = pq.top()[2]; + pq.pop(); + count++; + + for (int k=0; k<4; k++) + { + int x = i+dir[k].first; + int y = j+dir[k].second; + if (x<0||x>=m||y<0||y>=n) continue; + if (visited[x][y]) + continue; + + pq.push({grid[x][y],x,y}); + visited[x][y] = 1; + } + } + rets[idx] = count; + } + return rets; + } +}; diff --git a/BFS/2503.Maximum-Number-of-Points-From-Grid-Queries/Readme.md b/BFS/2503.Maximum-Number-of-Points-From-Grid-Queries/Readme.md new file mode 100644 index 000000000..7d291bded --- /dev/null +++ b/BFS/2503.Maximum-Number-of-Points-From-Grid-Queries/Readme.md @@ -0,0 +1,5 @@ +### 2503.Maximum-Number-of-Points-From-Grid-Queries + +根据题意,如果query越小,那么我们能够扩展的范围越小。query越大,扩展的范围是单调地增大的。所以我们必然会将queries排序,优先处理更小的query,比如常规的BFS来遍历所有小于query的格子;然后再处理更大的query,尝试重复利用已经探索过的网格区域。 + +此时,我们就发现此题很像`778.Swim-in-Rising-Water `,一开始只能在一个较矮的水平面游泳。后来水平面提升了,必然可以淹过一些在边界处相对地势较低的格子,不断往外溢出。于是,我们只要在BFS的时候,将队列里将格子按照地势从低到高排列。如果队列首元素的海拔小于query,那么它就确认被淹了,它的邻接格子就会成为新的堤岸被加入队列做下一步的考察。直至队列的首元素大于等于query,就意味着我们BFS的进程中被当前的边界所围住,已经占据了从左上角开始可以联络到的所有小于query的位置。 diff --git a/BFS/2577.Minimum-Time-to-Visit-a-Cell-In-a-Grid/2577.Minimum-Time-to-Visit-a-Cell-In-a-Grid.cpp b/BFS/2577.Minimum-Time-to-Visit-a-Cell-In-a-Grid/2577.Minimum-Time-to-Visit-a-Cell-In-a-Grid.cpp new file mode 100644 index 000000000..d39f07620 --- /dev/null +++ b/BFS/2577.Minimum-Time-to-Visit-a-Cell-In-a-Grid/2577.Minimum-Time-to-Visit-a-Cell-In-a-Grid.cpp @@ -0,0 +1,44 @@ +using AI3 = array; +class Solution { + vector>dir= {{0,1},{0,-1},{1,0},{-1,0}}; +public: + int minimumTime(vector>& grid) + { + if (grid[0][1]>1 && grid[1][0]>1) return -1; + + int m = grid.size(), n = grid[0].size(); + vector>arrival(m, vector(n,-1)); + + priority_queue, greater<>>pq; + if (grid[0][1]<=1) pq.push({1,0,1}); + if (grid[1][0]<=1) pq.push({1,1,0}); + + while (!pq.empty()) + { + auto [t,x,y] = pq.top(); + pq.pop(); + if (arrival[x][y]!=-1) + continue; + arrival[x][y] = t; + if (x==m-1 && y==n-1) + break; + + for (int k=0; k<4; k++) + { + int i = x+dir[k].first; + int j = y+dir[k].second; + if (i<0||i>=m||j<0||j>=n) continue; + if (arrival[i][j] != -1) continue; + + if (grid[i][j]<=t+1) + pq.push({t+1, i, j}); + else if ((grid[i][j]-t)%2==0) + pq.push({grid[i][j]+1, i, j}); + else + pq.push({grid[i][j], i, j}); + } + } + + return arrival[m-1][n-1]; + } +}; diff --git a/BFS/2577.Minimum-Time-to-Visit-a-Cell-In-a-Grid/Readme.md b/BFS/2577.Minimum-Time-to-Visit-a-Cell-In-a-Grid/Readme.md new file mode 100644 index 000000000..a2ce41a08 --- /dev/null +++ b/BFS/2577.Minimum-Time-to-Visit-a-Cell-In-a-Grid/Readme.md @@ -0,0 +1,7 @@ +### 2577.Minimum-Time-to-Visit-a-Cell-In-a-Grid + +如果本题没有“you must move to any adjacent cell”这个要求,那么套用Dijkstra算法的模板即可求得到达右下角的最短路径。其中任意两条相邻的格子a->b之间的边权就是`max(1, grid[b]-arrival[a]`,表示到达a之后,可以立即走一步到达b,或者原地等待到b的准入时刻再进入b。 + +以上算法的问题在于,我们不能在原地等待。假设我们到达a的时刻是3,但是其相邻的b点的准入时刻是5。显然,我们不能在时刻4的时候进入b,但我们可以再时刻5的时候进入b吗?其实也不能。我们唯一能拖延时间的方法就是从a走到一个相邻的格子再走回a,这样可以拖延两秒的时间,再进入b的时刻就是6. 同理我们可以发现,从a到任何与其相邻的格子b,考虑到“往复拖延”的策略,所需要的时间增量必然是+1,+3,+5,... 直至大于等于b的准入时刻。 + +所以我们只需要更新Dijkstra的部分代码。假设到达a的时刻是t,其相邻格子的准入时间是grid[b],那么如果`grid[b]<=t+1`,说明最早可以在t+1的时刻进入b;如果`(grid[b]-t)%2==1`,那么我们可以反复横跳之后恰好在grid[b]时刻进入b;否则我们需要在`grid[b]+1`时刻进入b。 diff --git a/BFS/2662.Minimum-Cost-of-a-Path-With-Special-Roads/2662.Minimum-Cost-of-a-Path-With-Special-Roads_v1.cpp b/BFS/2662.Minimum-Cost-of-a-Path-With-Special-Roads/2662.Minimum-Cost-of-a-Path-With-Special-Roads_v1.cpp new file mode 100644 index 000000000..659589a21 --- /dev/null +++ b/BFS/2662.Minimum-Cost-of-a-Path-With-Special-Roads/2662.Minimum-Cost-of-a-Path-With-Special-Roads_v1.cpp @@ -0,0 +1,57 @@ +using LL = long long; +class Solution { + int dp[405][405]; +public: + int minimumCost(vector& start, vector& target, vector>& specialRoads) + { + specialRoads.push_back({start[0], start[1], target[0], target[1], abs(start[0]-target[0])+abs(start[1]-target[1])}); + + vector>point; + mapreverseMap; + for (int i=0; i; +class Solution { +public: + int minimumCost(vector& start, vector& target, vector>& specialRoads) + { + priority_queue, greater<>>pq; // {dist to node, node id} + pq.push({0, encode(start[0], start[1])}); + for (auto& road: specialRoads) + { + int x = road[2], y = road[3]; + pq.push({abs(start[0]-x)+abs(start[1]-y), encode(x,y)}); + } + + mapdist; + LL ret = INT_MAX; + while (!pq.empty()) + { + auto [len, id] = pq.top(); + pq.pop(); + if (dist.find(id)!=dist.end()) continue; + dist[id] = len; + auto [x,y] = decode(id); + + ret = min(ret, len + abs(x-target[0])+abs(y-target[1])); + + for (auto& road: specialRoads) + { + int x1 = road[0], y1 = road[1]; + int x2 = road[2], y2 = road[3]; + int cost = road[4]; + LL id2 = encode(x2,y2); + + if (dist.find(id2)==dist.end()) + pq.push({len + abs(x-x1)+abs(y-y1) + cost, id2}); + } + + } + + return ret; + } + + LL encode(LL x, LL y) {return (x<<32) + y;} + PLL decode(LL id) {return {id>>32, id%(1LL<<32)};} +}; diff --git a/BFS/2662.Minimum-Cost-of-a-Path-With-Special-Roads/Readme.md b/BFS/2662.Minimum-Cost-of-a-Path-With-Special-Roads/Readme.md new file mode 100644 index 000000000..ab5677766 --- /dev/null +++ b/BFS/2662.Minimum-Cost-of-a-Path-With-Special-Roads/Readme.md @@ -0,0 +1,13 @@ +### 2662.Minimum-Cost-of-a-Path-With-Special-Roads + +#### 解法1:Floyd +考虑到有200条road,意味着400个点。用n^3的floyd算法,也许可以在时间范围内勉强求得任意两点之间的最短距离。我们只需要在special roads里加一条从start到target的曼哈顿距离,就可以构图套用模板了。 + +注意本题需要将点去重,否则会TLE。 + +注意,本题的初始化包括:1.同一点的距离是0 2.任意两点之间的距离有曼哈顿距离保底 3.road的两点之间的距离可以更新为cost。 + +#### 解法2:Dijkstra +对于每条special road,它的起点其实都是无关紧要的,保底用start到其曼哈顿距离即可。只有这些special road的终点才是改变这张图拓扑关系的关键点(否则永远都是trivial的网格结构)。所以我们可以用Dijkstra算法,来更新start到各个road终点的最短距离。最后在所有的终点x里,挑一个最小的`start->x->target`的距离,其中`x->target`是曼哈顿距离。 + +更具体地,我们从pq里弹出当前某点p的最短距离len,那么我们就可以利用从x到y的road,更新从p到y的距离:`len + abs|p-x| + cost`. diff --git a/BFS/2714.Find-Shortest-Path-with-K-Hops/2714.Find-Shortest-Path-with-K-Hops.cpp b/BFS/2714.Find-Shortest-Path-with-K-Hops/2714.Find-Shortest-Path-with-K-Hops.cpp new file mode 100644 index 000000000..ceaf49038 --- /dev/null +++ b/BFS/2714.Find-Shortest-Path-with-K-Hops/2714.Find-Shortest-Path-with-K-Hops.cpp @@ -0,0 +1,37 @@ +using AI3 = array; +using PII = pair; +class Solution { + vectornext[500]; +public: + int shortestPathWithHops(int n, vector>& edges, int source, int destination, int k) + { + for (auto& edge: edges) + { + int a = edge[0], b = edge[1], w = edge[2]; + next[a].push_back({b,w}); + next[b].push_back({a,w}); + } + priority_queue, greater<>>pq; + pq.push({0, source, k}); + + vector>dist(n, vector(k+1, INT_MAX/2)); + + while (!pq.empty()) + { + auto [d, cur, t] = pq.top(); + pq.pop(); + if (dist[cur][t]!=INT_MAX/2) continue; + dist[cur][t] = d; + if (cur==destination) return d; + + for (auto [nxt, weight]:next[cur]) + { + if (dist[nxt][t]==INT_MAX/2) + pq.push({d+weight, nxt, t}); + if (t>=1 && dist[nxt][t-1]==INT_MAX/2) + pq.push({d, nxt, t-1}); + } + } + return -1; + } +}; diff --git a/BFS/2714.Find-Shortest-Path-with-K-Hops/Readme.md b/BFS/2714.Find-Shortest-Path-with-K-Hops/Readme.md new file mode 100644 index 000000000..e133f601f --- /dev/null +++ b/BFS/2714.Find-Shortest-Path-with-K-Hops/Readme.md @@ -0,0 +1,5 @@ +### 2714.Find-Shortest-Path-with-K-Hops + +此题和`2093.Minimum-Cost-to-Reach-City-With-Discounts`几乎一样。我们用Dijkstra求最短距离时需要有两个参量,即`dist[node][hops]`表示还剩hops机会时node离原点的最短距离。当某状态向量`(dist, node, hops)`弹出队列时,我们可以加入两种相邻的状态`{dist+weight, nxt, hops}`或者`{dist, nxt, hops-1}`. + +注意当PQ第一次弹出destination时,无论hops是多少,即可以输出最短距离。 diff --git a/BFS/2812.Find-the-Safest-Path-in-a-Grid/2812.Find-the-Safest-Path-in-a-Grid.cpp b/BFS/2812.Find-the-Safest-Path-in-a-Grid/2812.Find-the-Safest-Path-in-a-Grid.cpp new file mode 100644 index 000000000..d1b8b8dc8 --- /dev/null +++ b/BFS/2812.Find-the-Safest-Path-in-a-Grid/2812.Find-the-Safest-Path-in-a-Grid.cpp @@ -0,0 +1,78 @@ +using PII = pair; +class Solution { +public: + vectordir = {{0,1},{0,-1},{1,0},{-1,0}}; + int maximumSafenessFactor(vector>& grid) + { + int n = grid.size(); + + queueq; + for (int i=0; i=n||j<0||j>=n) continue; + if (grid[i][j]!=0) continue; + grid[i][j] = grid[x][y]+1; + q.push({i,j}); + } + } + } + + int left = 0, right = n; + while (left < right) + { + int mid = right-(right-left)/2; + if (isOK(mid, grid)) + left = mid; + else + right = mid-1; + } + + return left; + } + + bool isOK(int d, vector>& grid) + { + int n = grid.size(); + vector>visited(n, vector(n, 0)); + + if (grid[0][0]<=d) return false; + + queueq; + q.push({0,0}); + visited[0][0] = 1; + + while (!q.empty()) + { + auto [x,y] = q.front(); + q.pop(); + for (int k=0; k<4; k++) + { + int i = x+dir[k].first; + int j = y+dir[k].second; + if (i<0||i>=n||j<0||j>=n) continue; + if (grid[i][j]<=d) continue; + if (visited[i][j]) continue; + + visited[i][j] = 1; + if (i==n-1 && j==n-1) return true; + q.push({i,j}); + } + } + + return false; + } +}; diff --git a/BFS/2812.Find-the-Safest-Path-in-a-Grid/Readme.md b/BFS/2812.Find-the-Safest-Path-in-a-Grid/Readme.md new file mode 100644 index 000000000..e6a9d4540 --- /dev/null +++ b/BFS/2812.Find-the-Safest-Path-in-a-Grid/Readme.md @@ -0,0 +1,5 @@ +### 2812.Find-the-Safest-Path-in-a-Grid + +我们预先处理grid,通过多源BFS,求出每个格子到离其最近的thief的距离grid[i][j]。为了便于处理grid里已经存在数值为1的格子,在这里我们填充grid[i][j]表示该点"离最近的thief的距离+1". + +然后我们二分搜值这个safety factor。假设是d,那么我们尝试寻找一条从左上到右下的通路,使得该路径不能包含有grid[i][j]<=d的格子,再走一次bfs即可判断。然后根据判断值,不断调整d的大小直至收敛。 diff --git a/BFS/3552.Grid-Teleportation-Traversal/3552.Grid-Teleportation-Traversal.cpp b/BFS/3552.Grid-Teleportation-Traversal/3552.Grid-Teleportation-Traversal.cpp new file mode 100644 index 000000000..d18d7b1e5 --- /dev/null +++ b/BFS/3552.Grid-Teleportation-Traversal/3552.Grid-Teleportation-Traversal.cpp @@ -0,0 +1,67 @@ +using pii = pair; +const int INF = 1e9; + +class Solution { +public: + int minMoves(vector& grid) + { + int m = grid.size(), n = grid[0].size(); + vector> dist(m, vector(n, INF)); + deque dq; + + vector> portals(26); + for(int i = 0; i < m; i++) + { + for(int j = 0; j < n; j++){ + char c = grid[i][j]; + if(c >= 'A' && c <= 'Z') + portals[c - 'A'].push_back({i, j}); + } + } + vector used(26, false); + + dist[0][0] = 0; + dq.push_back({0, 0}); + + int dirs[4][2] = {{1,0},{-1,0},{0,1},{0,-1}}; + + while(!dq.empty()) + { + auto [x,y] = dq.front(); + dq.pop_front(); + int cd = dist[x][y]; + if(x == m-1 && y == n-1) + return cd; + + char c = grid[x][y]; + if(c >= 'A' && c <= 'Z') + { + int idx = c - 'A'; + if(!used[idx]) + { + used[idx] = true; + for(auto [nx, ny] : portals[idx]) + { + if(dist[nx][ny] > cd) { + dist[nx][ny] = cd; + dq.push_front({nx, ny}); + } + } + } + } + + for(auto &d : dirs) + { + int nx = x + d[0], ny = y + d[1]; + if(nx < 0 || nx >= m || ny < 0 || ny >= n) continue; + if(grid[nx][ny] == '#') continue; + if(dist[nx][ny] > cd + 1) { + dist[nx][ny] = cd + 1; + dq.push_back({nx, ny}); + } + } + } + + return -1; + } +}; diff --git a/BFS/3552.Grid-Teleportation-Traversal/Readme.md b/BFS/3552.Grid-Teleportation-Traversal/Readme.md new file mode 100644 index 000000000..14c4e7b4d --- /dev/null +++ b/BFS/3552.Grid-Teleportation-Traversal/Readme.md @@ -0,0 +1,3 @@ +### 3552.Grid-Teleportation-Traversal + +这是一个典型的用deque的BFS。因为题目中有“瞬移”的路径,假设是A到B,那么我们从队列中弹出A之后,不能将B压入队列的尾部,这样会影响找到最短路径的效率。因为A到B之间是不计时间的,我们将B的状态放在队首即可,故需要双端队列做这个容器。 diff --git a/BFS/3568.Minimum-Moves-to-Clean-the-Classroom/3568.Minimum-Moves-to-Clean-the-Classroom.cpp b/BFS/3568.Minimum-Moves-to-Clean-the-Classroom/3568.Minimum-Moves-to-Clean-the-Classroom.cpp new file mode 100644 index 000000000..7a5307a84 --- /dev/null +++ b/BFS/3568.Minimum-Moves-to-Clean-the-Classroom/3568.Minimum-Moves-to-Clean-the-Classroom.cpp @@ -0,0 +1,74 @@ +using state = tuple; +class Solution { +public: + int minMoves(vector& grid, int energy) { + int m = grid.size(), n = grid[0].size(); + + pairstart; + vector>litter_pos; + vector>litter_idx(m, vector(n,-1)); + + for (int i=0; i>>> visited( + m, vector>>( + n, vector>(energy + 1, vector(1 << L, false)))); + + visited[start.first][start.second][energy][0] = true; + + vector>dir={{0,1},{0,-1},{1,0},{-1,0}}; + + queue q; + q.push({start.first, start.second, energy, 0}); + int step = 0; + + while (!q.empty()) + { + int len = q.size(); + while (len--) + { + auto [x,y,e,mask] = q.front(); + q.pop(); + if (mask==(1<=m||ny<0||ny>=n) continue; + + char cell = grid[nx][ny]; + if (cell=='X') continue; + + int newEnergy = e-1; + if (newEnergy < 0) continue; + if (cell == 'R') newEnergy = energy; + + int newMask = mask; + if (grid[nx][ny]=='L') + { + int idx = litter_idx[nx][ny]; + newMask |= (1< o.dist; + } +}; + +class Solution { +public: + double minTime(int n, int k, int m, vector& time, vector& mul) { + vector>dist(1<(m, 1e300)); + priority_queuepq; + dist[0][0] = 0; + pq.push({0, 0, 0.0}); + int END = (1< dist[mask][stage]) continue; + if (mask==END) return d0; + + int rem = (~mask)&END; + for (int sub = rem; sub>0; sub = (sub-1)&rem) { + if (__builtin_popcount(sub)>k) continue; + int mx = 0; + for (int i=0; i o.dist; + } +}; +``` +注意对于运算符<的重载,这样我们定义`priority_queue`的时候,就会自动将dist最小的state排在top。 + +每次从PQ里拿出一组{mask, stage, d},我们需要在mask的补集(rem)里面枚举子集sub。将sub对应的人渡河之后(此时状态更新为`mask2=mask+sub`),再在“所有已经渡河的人”里(即mask2)选一个人i将传开回来,由此得到该回合最终的`mask3=mask2-(1< o.time; + } +}; +class Solution { + vector>next[100005]; +public: + int minTime(int n, vector>& edges) { + for (auto& e:edges) { + int u=e[0], v=e[1], start=e[2], end=e[3]; + next[u].push_back({v, start, end}); + } + + vectorvisited(n, -1); + priority_queuepq; + pq.push({0,0}); + + while (!pq.empty()) { + auto [node, time] = pq.top(); + // cout<s+1)) { + pq.push({v, s+1}); + } + if (time >= s && time <=e && (visited[v]==-1 || visited[v]>time+1)) { + pq.push({v, time+1}); + } + } + } + + return -1; + } +}; diff --git a/BFS/3604.Minimum-Time-to-Reach-Destination-in-Directed-Graph/Readme.md b/BFS/3604.Minimum-Time-to-Reach-Destination-in-Directed-Graph/Readme.md new file mode 100644 index 000000000..cadb887a1 --- /dev/null +++ b/BFS/3604.Minimum-Time-to-Reach-Destination-in-Directed-Graph/Readme.md @@ -0,0 +1,12 @@ +### 3604.Minimum-Time-to-Reach-Destination-in-Directed-Graph + +常规的Dijkstra的模版题。本题优先队里的元素需要定义状态 +```cpp +struct state { + int node, time; + bool operator<(state const& o) const { + return time > o.time; + } +}; +``` +对于出队列的一个状态{node,time},我们可知到达node的最短时间就是time。然后我们检查它周围的路径{v,start,end}。如果time& nums) { + int MAXA = *max_element(nums.begin(), nums.end()); + vectorspf(MAXA+1, 0); + for (int i=2; i<=MAXA;i++) { + if (spf[i]) continue; + for (int j=i; j<=MAXA; j+=i) + if (!spf[j]) spf[j]=i; + } + + vector>prime_to_idx(MAXA+1); + int n = nums.size(); + for (int i=0; i1) { + int p = spf[x]; + prime_to_idx[p].push_back(i); + while (x%p==0) x/=p; + } + } + + const int INF = 1e9; + vectordist(n, INF); + vectorused_prime(MAXA+1, 0); + dequeq; + dist[0] = 0; + q.push_back(0); + + while (!q.empty()) { + int i = q.front(); + q.pop_front(); + int d = dist[i]; + if (i==n-1) + return d; + + if (i+1=0 && dist[i-1]==INF) { + dist[i-1] = d+1; + q.push_back(i-1); + } + int x = nums[i]; + if (x>1 && spf[x]==x) { + if (used_prime[x]) continue; + used_prime[x] = 1; + for (int j: prime_to_idx[x]) { + if (dist[j]==INF) { + dist[j] = d+1; + q.push_back(j); + } + } + } + } + + return 0; + } +}; diff --git a/BFS/3629.Minimum-Jumps-to-Reach-End-via-Prime-Teleportation/Readme.md b/BFS/3629.Minimum-Jumps-to-Reach-End-via-Prime-Teleportation/Readme.md new file mode 100644 index 000000000..67a2810af --- /dev/null +++ b/BFS/3629.Minimum-Jumps-to-Reach-End-via-Prime-Teleportation/Readme.md @@ -0,0 +1,7 @@ +### 3629.Minimum-Jumps-to-Reach-End-via-Prime-Teleportation + +很容易判断,总体框架必然是BFS。 + +此题额外要求预处理nums里的每个质数与其倍数之间的映射关系,存入prime_to_idx中。一个高效的做法是在用埃氏筛判定1到M内的所有质数时,顺便记录下每个自然数的最小质因数(spf)。这样就方便我们对于nums的每个元素x做快速的质因数分解(不断去除以spf[x]),从而建立起它的所有质因子p到x的映射集合。 + +BFS的写法比较常规。从队列里弹出index=i后,考察是否需要将`i+1`, `i-1`以及`prime_to_idx[nums[i]]`(仅当nums[i]是质数时)放入队列。注意对于已经处理过的质数需要略过。 diff --git a/BFS/490.The-Maze/490.The-Maze.cpp b/BFS/490.The-Maze/490.The-Maze.cpp new file mode 100644 index 000000000..accdb5d23 --- /dev/null +++ b/BFS/490.The-Maze/490.The-Maze.cpp @@ -0,0 +1,49 @@ +class Solution { + int M,N; + vector> dir = {{1,0},{-1,0},{0,1},{0,-1}}; + +public: + bool hasPath(vector>& maze, vector& start, vector& destination) + { + if (start==destination) return true; + + M = maze.size(); + N = maze[0].size(); + + auto visited=vector>(M,vector(N,0)); + + queue>q; + q.push({start[0],start[1]}); + visited[start[0]][start[1]] = 1; + + while (!q.empty()) + { + int x0 = q.front().first; + int y0 = q.front().second; + q.pop(); + + for (int k=0; k<4; k++) + { + auto [x,y] = nextPos(maze,x0,y0,k); + if (x==destination[0] && y==destination[1]) return true; + if (visited[x][y]==1) continue; + visited[x][y]=1; + q.push({x,y}); + } + } + return false; + } + + pair nextPos(vector>& maze, int x0, int y0, int k) + { + int x = x0, y = y0; + while (x>=0 && x=0 && y; +using TP = tuple; class Solution { -private: + vector>dir = {{1,0},{0,-1},{0,1},{-1,0}}; // d,l,r,u int m,n; - vector> dir={{1,0},{0,-1},{0,1},{-1,0}}; - public: - int Next(int i, int j, vector>& maze, int k, vector& hole) - { - int steps=0; - while (i+dir[k].first>=0 && i+dir[k].first=0 && j+dir[k].second>& maze, vector& ball, vector& hole) { - m = maze.size(); - n = maze[0].size(); - - vector>dist(m, vector(n, INT_MAX)); - vector>inst(m, vector(n, "z")); + m = maze.size(), n = maze[0].size(); - priority_queue, greater<>> pq; + priority_queue, greater<>>pq; // {dist, string, node_x, node_y} pq.push({0, "", ball[0], ball[1]}); + vector>dist(m, vector(n,INT_MAX)); + string ret; + while (!pq.empty()) { auto [d, s, x, y] = pq.top(); pq.pop(); - if (d > dist[x][y]) continue; - dist[x][y] = d; - inst[x][y] = min(inst[x][y], s); - - if (x==hole[0] && y==hole[1]) continue; + if (d > dist[x][y]) continue; + else dist[x][y] = d; + + if (x==hole[0] && y==hole[1]) + { + ret = s; + break; + } for (int k=0; k<4; k++) - { - int step = Next(x,y,maze,k,hole); - int i = x+dir[k].first*step; - int j = y+dir[k].second*step; - - char ch='0'+k; - if (d+step > dist[i][j]) continue; - if (d+step == dist[i][j] && (s+ch) >= inst[i][j]) continue; + { + int step = Next(x,y,k,maze, hole); + int i = x + dir[k].first * step; + int j = y + dir[k].second * step; + + char ch = '0'+k; + if (d+step >= dist[i][j]) continue; pq.push({d+step, s+ch, i, j}); - } + } } - if (dist[hole[0]][hole[1]]==INT_MAX) - return "impossible"; - - string ret = inst[hole[0]][hole[1]]; for (int i=0; i>& maze, vector& hole) + { + int step = 0; + while (x+dir[k].first >= 0 && x+dir[k].first < m && y+dir[k].second >= 0 && y+dir[k].second < n && maze[x+dir[k].first][y+dir[k].second]!=1) + { + step++; + x+=dir[k].first; + y+=dir[k].second; + if (x==hole[0] && y==hole[1]) + break; + } + return step; } }; diff --git a/BFS/499.The-Maze-III/Readme.md b/BFS/499.The-Maze-III/Readme.md index bac76a9da..fd7c33027 100644 --- a/BFS/499.The-Maze-III/Readme.md +++ b/BFS/499.The-Maze-III/Readme.md @@ -1,7 +1,7 @@ ### 499.The-Maze-III -此题在505.The-Maze-II的基础上增加了一个条件:在最短距离相同的情况下,要更新为字典序最小的instruction。 +此题在505.The-Maze-II的基础上增加了一个条件:在最短距离相同的情况下,要更新为字典序最小的指令。 -LC 505是单纯地求最短路径,所以无脑使用Dijkstra算法:任何一个节点(记做X)第一次被弹出PQ时,所对应的dist就是该点X到原点的最短距离。当X在第二次或之后被弹出PQ时可以直接忽视。就算不直接忽视,你校验一下路径距离,肯定也不会更优。所以Dijkstra算法的最大好处就是有机会提前终止(如果只考察目标点target)。 +事实上本题与505并没有太大的区别,只不过优先队列的比较函数里增加一个条件:当(离源点)路径距离相等的时候,将路径指令的字典序更小的节点排在更前面。这样当终点第一次从优先队列里弹出的时候,不仅对应的距离最短,而且如果有多个相同距离的路径的话,指令也是最小的。 -但是本题则略微不同。当X第二次被弹出PQ时,有可能路径与之前登记的相同、但是instruction的字典序更优。在这种情况下,我们就不能直接忽视这个X,而是需要主动检验,如果满足“路径相等并且instruction更优”,就要更新关于X点的记录,并且必须将从X出发的邻接节点再次放入PQ里面。所以在本题里,我们必须让整个PQ主动跑完(变成空),而不能遇到target时就终止程序。 +有人会疑问,当终点第一次从队列里弹出的时候,固然对应着距离最短的路径,那么怎么保证所有该有着相同最短距离、但指令不同的路径都已经进入队列了呢?这是因为,假设终点第一次弹出队列所对应的路径长度是x,那么此时所有路径长度小于x的路径必然已经都弹出队列了,他们的后续路径必然包括了所有以距离x达到终点的路径。 diff --git a/Binary_Search/1102.Path-With-Maximum-Minimum-Value/1102.Path-With-Maximum-Minimum-Value.cpp b/Binary_Search/1102.Path-With-Maximum-Minimum-Value/1102.Path-With-Maximum-Minimum-Value_v1.cpp similarity index 53% rename from Binary_Search/1102.Path-With-Maximum-Minimum-Value/1102.Path-With-Maximum-Minimum-Value.cpp rename to Binary_Search/1102.Path-With-Maximum-Minimum-Value/1102.Path-With-Maximum-Minimum-Value_v1.cpp index 687cb3831..e45f394ed 100644 --- a/Binary_Search/1102.Path-With-Maximum-Minimum-Value/1102.Path-With-Maximum-Minimum-Value.cpp +++ b/Binary_Search/1102.Path-With-Maximum-Minimum-Value/1102.Path-With-Maximum-Minimum-Value_v1.cpp @@ -1,23 +1,13 @@ class Solution { public: - int maximumMinimumPath(vector>& A) + int maximumMinimumPath(vector>& grid) { - int left = INT_MAX; - int right = INT_MIN; - int M = A.size(); - int N = A[0].size(); - for (int i=0; i> A, int K) + bool check(vector> grid, int K) { - if (A[0][0]>({{1,0},{-1,0},{0,1},{0,-1}}); - int M = A.size(); - int N = A[0].size(); + int M = grid.size(), N = grid[0].size(); queue>q; q.push({0,0}); - A[0][0] = -1; + vector>visited(M, vector(N)); + visited[0][0] = 1; while (q.size()>0) { @@ -47,19 +37,16 @@ class Solution { { int i = x+dir[k].first; int j = y+dir[k].second; - if (i<0||i>=M||j<0||j>=N) - continue; - if (A[i][j]==-1) - continue; - if (A[i][j]=M||j<0||j>=N) continue; + if (visited[i][j]) continue; + if (grid[i][j]; +class Solution { + vector>dir = {{1,0},{-1,0},{0,1},{0,-1}}; +public: + int maximumMinimumPath(vector>& grid) + { + int M = grid.size(); + int N = grid[0].size(); + priority_queuepq; + + pq.push({grid[0][0], 0,0}); + vector>visited(M, vector(N,0)); + vector>rets(M, vector(N,0)); + + while (pq.size()>0) + { + auto [d, x, y] = pq.top(); + pq.pop(); + if (visited[x][y]==1) continue; + rets[x][y] = d; + visited[x][y] = 1; + if (x==M-1 && y==N-1) + return d; + + for (int k=0; k<4; k++) + { + int i = x+dir[k].first; + int j = y+dir[k].second; + if (i<0||i>=M||j<0||j>=N) + continue; + pq.push({min(d, grid[i][j]), i, j}); + } + } + + return -1; + } +}; diff --git a/Binary_Search/1102.Path-With-Maximum-Minimum-Value/Readme.md b/Binary_Search/1102.Path-With-Maximum-Minimum-Value/Readme.md index 14679ddcf..b76823958 100644 --- a/Binary_Search/1102.Path-With-Maximum-Minimum-Value/Readme.md +++ b/Binary_Search/1102.Path-With-Maximum-Minimum-Value/Readme.md @@ -1,11 +1,11 @@ ### 1102.Path-With-Maximum-Minimum-Value -此题是二分法的非常精彩的应用. +#### 解法1:二分搜索 -我们想,如果这个maximum score是x,那么意味着存在一条路径,里面不能有任何小于x的格子.因此,如果给定x,我们尝试用BFS的方法从左上角走到右下角.如果能抵达,说明至少存在一条成功的路,他们所有的元素都不会小于x,而且x还可能有提升的空间.相反,如果不能走到,说明从左上到右下被一些列小于x的各自给阻断了,因此我们对于maximum score的预期必须下调,至少得小于x. +我们想,如果这个maximum score是x,那么意味着存在一条路径,里面不能有任何小于x的格子。因此,如果给定x,我们尝试用BFS的方法从左上角走到右下角,约定不能经过任何小于x的格子。如果能抵达,说明至少存在一条成功的路,其沿途元素都不会小于x,故x是一个可行解,我们可以调高对maximum score的预期。相反,如果不能走到,说明从左上到右下肯定会被一系列小于x的格子给阻断了,因此我们对于maximum score的预期必须下调,至少得小于x. 所以二分的策略就非常清楚了: -``` +```cpp while (left& bloomDay, int m, int k) { int n = bloomDay.size(); - if (n= 3)```。原因是区间大学小于3的时候无法成功三分区间。 + +[Leetcode link](https://leetcode.com/problems/find-the-index-of-the-large-integer/) diff --git a/Binary_Search/1608.Special-Array-With-X-Elements-Greater-Than-or-Equal-X/Readme.md b/Binary_Search/1608.Special-Array-With-X-Elements-Greater-Than-or-Equal-X/Readme.md index ef6facc01..06e76f214 100644 --- a/Binary_Search/1608.Special-Array-With-X-Elements-Greater-Than-or-Equal-X/Readme.md +++ b/Binary_Search/1608.Special-Array-With-X-Elements-Greater-Than-or-Equal-X/Readme.md @@ -9,7 +9,7 @@ 这样的时间复杂度就是 ```log(N)*N```. -对于有唯一确定解的题目(注意不是有唯一最优解),除了可以沿用我一贯推荐的```while(left<=right)```模板之外,我们可以用这样的模板: +对于有唯一确定解的题目(注意不是有唯一最优解),除了可以沿用我一贯推荐的```while(left& batteries) { - LL left = 0, right = LLONG_MAX/2; - // sort(batteries.rbegin(), batteries.rend()); - + LL left = 0, right = LLONG_MAX/n; while (left < right) { LL mid = right-(right-left)/2; - if (checkOK(mid, batteries, n)) - left = mid; + if (checkOK(mid, n, batteries)) + left = mid; else - right = mid-1; + right = mid-1; } - return left; + return left; } - bool checkOK(LL T, vector&nums, int n) + bool checkOK(LL T, LL n, vector& batteries) { - int count = 0; - LL cur = 0; - for (int i=0; i= T) - { - count++; - cur-=T; - } - if (count >= n) + sum += min((LL)x, T); + if (sum >= T*n) return true; } return false; } }; + diff --git a/Binary_Search/2226.Maximum-Candies-Allocated-to-K-Children/2226.Maximum-Candies-Allocated-to-K-Children.cpp b/Binary_Search/2226.Maximum-Candies-Allocated-to-K-Children/2226.Maximum-Candies-Allocated-to-K-Children.cpp new file mode 100644 index 000000000..3b6ca33c3 --- /dev/null +++ b/Binary_Search/2226.Maximum-Candies-Allocated-to-K-Children/2226.Maximum-Candies-Allocated-to-K-Children.cpp @@ -0,0 +1,33 @@ +using LL = long long; +class Solution { +public: + int maximumCandies(vector& candies, long long k) + { + LL total = 0; + for (auto x: candies) + total += (LL)x; + + LL left= 0, right = total/k; + while (left < right) + { + LL mid = right - (right - left) / 2; + if (checkOK(candies, mid, k)) + left = mid; + else + right = mid-1; + } + return left; + } + + bool checkOK(vector& candies, LL numPerPile, LL k) + { + LL count = 0; + for (LL x: candies) + { + count += x / numPerPile; + if (count >= k) + return true; + } + return false; + } +}; diff --git a/Binary_Search/2226.Maximum-Candies-Allocated-to-K-Children/Readme.md b/Binary_Search/2226.Maximum-Candies-Allocated-to-K-Children/Readme.md new file mode 100644 index 000000000..5535a9e03 --- /dev/null +++ b/Binary_Search/2226.Maximum-Candies-Allocated-to-K-Children/Readme.md @@ -0,0 +1,9 @@ +### 2226.Maximum-Candies-Allocated-to-K-Children + +这是一道非常明显的二分搜值的题目。 + +我们令每个孩子可以分得的糖果数目记做x。如果x很大,意味着我们可以构造的、符合条件(即每堆恰有x个糖果)的堆数会变少,极有可能最终不够k堆。反之,如果x很小,意味着我们可以构造出更多的、符合条件的堆数,甚至超过k堆,导致这个答案不够优秀。所以我们可以通过二分搜索尝试不同的x的值,来逼近最大的x,使得构造出的堆数恰好大于等于k。 + +对于给定的x,将每堆的糖果个数除以x,就是该堆可以拆分出的、符合条件的堆数。我们只需检验符合条件的总堆数是否大于等于k即可。 + +此题和```1891. Cutting Ribbons```一模一样. diff --git a/Binary_Search/2387.Median-of-a-Row-Wise-Sorted-Matrix/2387.Median-of-a-Row-Wise-Sorted-Matrix.cpp b/Binary_Search/2387.Median-of-a-Row-Wise-Sorted-Matrix/2387.Median-of-a-Row-Wise-Sorted-Matrix.cpp new file mode 100644 index 000000000..211ed8d22 --- /dev/null +++ b/Binary_Search/2387.Median-of-a-Row-Wise-Sorted-Matrix/2387.Median-of-a-Row-Wise-Sorted-Matrix.cpp @@ -0,0 +1,25 @@ +class Solution { +public: + int matrixMedian(vector>& grid) + { + int m = grid.size(); + int n = grid[0].size(); + int k = (m*n+1)/2; + + int left = 0, right = INT_MAX; + while (left < right) + { + int mid = left+(right-left)/2; + int count = 0; + for (int i=0; i& nums) + { + int left = nums[0], right = 1e9; + while (left < right) + { + int mid = left+(right-left)/2; + + long long buff = 0; + int flag = true; + for (int i=0; i mid) + buff -= (x-mid); + else + buff += (mid-x); + if (buff < 0) + { + flag = false; + break; + } + } + + if (flag) + right = mid; + else + left = mid+1; + } + + return left; + + } +}; diff --git a/Binary_Search/2439.Minimize-Maximum-of-Array/2439.Minimize-Maximum-of-Array_v2.cpp b/Binary_Search/2439.Minimize-Maximum-of-Array/2439.Minimize-Maximum-of-Array_v2.cpp new file mode 100644 index 000000000..98e5ff552 --- /dev/null +++ b/Binary_Search/2439.Minimize-Maximum-of-Array/2439.Minimize-Maximum-of-Array_v2.cpp @@ -0,0 +1,14 @@ +class Solution { +public: + int minimizeArrayValue(vector& nums) + { + long long sum = 0; + long long ret = 0; + for (int i=0; inums[0],那么这意味着后面的元素有机会向nums[0]分摊一些数值,我们记做“缓冲值”:`buff = x-nums[0]`. + +接下来我们看nums[1]。如果`nums[1]>x`,那么不得不让它往前分摊数值,最多能够分摊多少呢?显然就是buff。于是如果`buff> nums[1]-x`,那么就OK,同时`buff-=nums[1]-x`;否则就直接返回失败。反之,如果`nums[1]= uniqueCnt1 && b >= uniqueCnt2`,是不是意味着构造了太多的元素了呢?并不是,因为a和b有部分是重合的,其中有c的部分是可以在A与B之间自由调剂的,但是只能出现在A或B中的一个。所以实际可以构造的总数是`a+b-c`,如果`a+b-c < uniqueCnt1+uniqueCnt2`的话,那说明n太小了。 + +以上就是三个判定n太小(可构造的数字不够)的判据。以此我们可以二分搜索得到最小的n,使得恰好以上三个判据都不满足。 diff --git a/Binary_Search/2517.Maximum-Tastiness-of-Candy-Basket/2517.Maximum-Tastiness-of-Candy-Basket.cpp b/Binary_Search/2517.Maximum-Tastiness-of-Candy-Basket/2517.Maximum-Tastiness-of-Candy-Basket.cpp new file mode 100644 index 000000000..d15ffb14c --- /dev/null +++ b/Binary_Search/2517.Maximum-Tastiness-of-Candy-Basket/2517.Maximum-Tastiness-of-Candy-Basket.cpp @@ -0,0 +1,40 @@ +class Solution { +public: + int maximumTastiness(vector& price, int k) + { + sort(price.begin(), price.end()); + + int left = 0, right = INT_MAX/2; + while (left < right) + { + int mid = right - (right-left)/2; + if (isOK(price, mid, k)) + left = mid; + else + right = mid-1; + } + return left; + } + + bool isOK(vector& price, int diff, int k) + { + int count = 1; + for (int i=0; i= k) return true; + } + + return false; + + } +}; diff --git a/Binary_Search/2517.Maximum-Tastiness-of-Candy-Basket/Readme.md b/Binary_Search/2517.Maximum-Tastiness-of-Candy-Basket/Readme.md new file mode 100644 index 000000000..8a7db0d39 --- /dev/null +++ b/Binary_Search/2517.Maximum-Tastiness-of-Candy-Basket/Readme.md @@ -0,0 +1,5 @@ +### 2517.Maximum-Tastiness-of-Candy-Basket + +很明显我们会将price排序,此时“最小的间隔”必然是某两个连续选取的元素(不见得是相邻的)之差。此时,为了最大化这个间隔,我们需要在这个有序数组里跳跃着取数,但是又不能跨越得太大,否则无法凑齐k个元素。 + +对于这种题目,二分搜值是非常常见的手段。我们猜测一个跨度d,必然会从第一个元素开始选起(尽可能地拉低下限),找到下一个跨度与之恰好大于d的元素,再找一下,直至看是否能够选到k个元素。可以的话,就试图增加d,否则就减小d。 diff --git a/Binary_Search/2528.Maximize-the-Minimum-Powered-City/2528.Maximize-the-Minimum-Powered-City.cpp b/Binary_Search/2528.Maximize-the-Minimum-Powered-City/2528.Maximize-the-Minimum-Powered-City.cpp new file mode 100644 index 000000000..28e57289f --- /dev/null +++ b/Binary_Search/2528.Maximize-the-Minimum-Powered-City/2528.Maximize-the-Minimum-Powered-City.cpp @@ -0,0 +1,44 @@ +using LL = long long; +class Solution { +public: + long long maxPower(vector& stations, int r, int k) + { + LL left = 0, right = LLONG_MAX; + while (left < right) + { + LL mid = right-(right-left)/2; + if (isOK(stations, r, k, mid)) + left = mid; + else + right = mid-1; + } + return left; + } + + bool isOK(vectorstations, int r, LL k, LL m) + { + int n = stations.size(); + LL sum = 0; + for (int i=0; i<=min(n-1, r-1); i++) + sum += stations[i]; + + for (int i=0; i= 0) + sum -= stations[i-r-1]; + + if (sum >= m) continue; + + LL diff = m - sum; + if (diff > k) + return false; + stations[min(n-1, i+r)] += diff; + sum = m; + k -= diff; + } + return true; + } +}; diff --git a/Binary_Search/2528.Maximize-the-Minimum-Powered-City/Readme.md b/Binary_Search/2528.Maximize-the-Minimum-Powered-City/Readme.md new file mode 100644 index 000000000..10bd5dee7 --- /dev/null +++ b/Binary_Search/2528.Maximize-the-Minimum-Powered-City/Readme.md @@ -0,0 +1,3 @@ +### 2528.Maximize-the-Minimum-Powered-City + +我们假设The minimum power of a city是m,那么意味着每个城市都可以得到至少m的供应。我们就顺着走一遍每个城市,看它是否已经实现了周围[i-r,i+r]范围内的滑窗和大于等于m。如果不够m,那么我们必然会贪心地增加最优端的那个城市的供给(因为它能覆盖更多未来的城市)补齐至m。一路走下去,如果总共需要补建的电力站小于等于k,那么就说明m是可以实现的,于是可以尝试猜测更大的m。如果需要补建的电力站大于k,那么就说明m不可行,就尝试猜测更小的m。 diff --git a/Binary_Search/2557.Maximum-Number-of-Integers-to-Choose-From-a-Range-II/2557.Maximum-Number-of-Integers-to-Choose-From-a-Range-II.cpp b/Binary_Search/2557.Maximum-Number-of-Integers-to-Choose-From-a-Range-II/2557.Maximum-Number-of-Integers-to-Choose-From-a-Range-II.cpp new file mode 100644 index 000000000..b210ebc83 --- /dev/null +++ b/Binary_Search/2557.Maximum-Number-of-Integers-to-Choose-From-a-Range-II/2557.Maximum-Number-of-Integers-to-Choose-From-a-Range-II.cpp @@ -0,0 +1,33 @@ +class Solution { + vectorpresum; +public: + int maxCount(vector& banned, int n, long long maxSum) + { + banned.erase(std::unique(banned.begin(), banned.end()),banned.end()); + sort(banned.begin(), banned.end()); + + presum.resize(banned.size()); + for (int i=0; i& banned, int n, long long maxSum) + { + int t = upper_bound(banned.begin(), banned.end(), m) - banned.begin(); + long long sum = (1+m)*m/2 - (t==0?0:presum[t-1]); + return sum > maxSum; + } +}; diff --git a/Binary_Search/2557.Maximum-Number-of-Integers-to-Choose-From-a-Range-II/Readme.md b/Binary_Search/2557.Maximum-Number-of-Integers-to-Choose-From-a-Range-II/Readme.md new file mode 100644 index 000000000..f5a716dcc --- /dev/null +++ b/Binary_Search/2557.Maximum-Number-of-Integers-to-Choose-From-a-Range-II/Readme.md @@ -0,0 +1,9 @@ +### 2557.Maximum-Number-of-Integers-to-Choose-From-a-Range-II + +我们猜测能够取到的最大的数值为m。只要确定了m,我们可以计算出[1,m]之间能够取到的元素的和sum是多少。如果sum大于maxSum,那么我们就猜测更小的m,否则就猜更大的数。 + +如何计算sum呢?首先就是计算1到m的等差数列之和。然后在排序后的banned里用upper_bound定位第一个大于m的位置。如果这个位置的index是t,那么就意味着banned里面有t个元素小于等于m,那么我们就需要将banned的前t个元素的和减去。 + +我们二分搜索求出m之后,还需要减去t,才是最后的答案。 + +注意,二分搜索的收敛值m有可能是banned中的元素,但是对于本题的解没有影响。因为本题的答案是最终取多少个元素。m终究是会被减去的。 diff --git a/Binary_Search/2560.House-Robber-IV/2560.House-Robber-IV.cpp b/Binary_Search/2560.House-Robber-IV/2560.House-Robber-IV.cpp new file mode 100644 index 000000000..045399b45 --- /dev/null +++ b/Binary_Search/2560.House-Robber-IV/2560.House-Robber-IV.cpp @@ -0,0 +1,43 @@ +class Solution { + int dp[100005][2]; +public: + int minCapability(vector& nums, int k) + { + int left = 0, right = INT_MAX/2; + while (left < right) + { + int mid = left+(right-left)/2; + if (atLeastK(mid, nums, k)) + right = mid; + else + left = mid+1; + } + return left; + } + + bool atLeastK(int cap, vector& nums, int k) + { + int n = nums.size(); + dp[0][0] = 0; + if (nums[0] <= cap) + dp[0][1] = 1; + else + dp[0][1] = INT_MIN/2; + + for (int i=1; i cap) + { + dp[i][0] = max(dp[i-1][0], dp[i-1][1]); + dp[i][1] = INT_MIN/2; + } + else + { + dp[i][0] = max(dp[i-1][0], dp[i-1][1]); + dp[i][1] = dp[i-1][0]+1; + } + } + + return max(dp[n-1][0], dp[n-1][1]) >= k; + } +}; diff --git a/Binary_Search/2560.House-Robber-IV/Readme.md b/Binary_Search/2560.House-Robber-IV/Readme.md new file mode 100644 index 000000000..f6c071d3e --- /dev/null +++ b/Binary_Search/2560.House-Robber-IV/Readme.md @@ -0,0 +1,17 @@ +### 2560.House-Robber-IV + +这道题的题意需要仔细理解。最小化capability意味着我们需要尽量挑数值小的house,但是如果我们挑的数值都太小的话,就没有足够的house来满足“at least k houses”的约束。于是我们就发现了单调性的变化:capability越小,那么可选的house就越少。capability越大,那么可选的house就越多。于是我们需要找恰好的capability,使得可选的house恰好大于等于k。因此,这是一个二分搜值的算法。 + +我们猜测需要的capability是c,那么可以选多少house呢?满足两个条件:挑选的house的数值不能大于c,挑选的house不能相邻。对于后者,我们知道house robber的一个通用技巧,就是对每一个house都讨论取还是不取两种策略。所以我们令`dp[i][0]`表示第i个house不抢的策略下所能选中的house数目,`dp[i][1]`表示第i个house抢的策略下所能选中的house数目。 + +考虑第i个房子,如果`house[i]>c`,我们终归是不能抢的,故第i-1个house抢不抢无所谓。 +```cpp +dp[i][0] = max(dp[i-1][0], dp[i-1][1]); +dp[i][1] = INT_MIN/2; +``` +考虑第i个房子,如果`house[i]<=c`,我们可以选择抢,也可以选择不抢 +```cpp +dp[i][0] = max(dp[i-1][0], dp[i-1][1]); +dp[i][1] = dp[i-1][0]+1; +``` +由此将所有的dp[i][x]都更新。最后考察dp[n-1][x]能否大于k,即意味着在当前c的设置下,能否实现至少抢k个房子。 diff --git a/Binary_Search/2563.Count-the-Number-of-Fair-Pairs/2563.Count-the-Number-of-Fair-Pairs.cpp b/Binary_Search/2563.Count-the-Number-of-Fair-Pairs/2563.Count-the-Number-of-Fair-Pairs.cpp new file mode 100644 index 000000000..7abfb660a --- /dev/null +++ b/Binary_Search/2563.Count-the-Number-of-Fair-Pairs/2563.Count-the-Number-of-Fair-Pairs.cpp @@ -0,0 +1,23 @@ +class Solution { +public: + long long countFairPairs(vector& nums, int lower, int upper) + { + sort(nums.begin(), nums.end()); + + long long ret = 0; + for (int x: nums) + { + int k = upper_bound(nums.begin(), nums.end(), upper-x) - nums.begin(); + int t = lower_bound(nums.begin(), nums.end(), lower-x) - nums.begin(); + int count = k-t; + + if (x+x<=upper && x+x>=lower) + count--; + + ret += count; + } + + return ret/2; + + } +}; diff --git a/Binary_Search/2563.Count-the-Number-of-Fair-Pairs/Readme.md b/Binary_Search/2563.Count-the-Number-of-Fair-Pairs/Readme.md new file mode 100644 index 000000000..c6c676cf2 --- /dev/null +++ b/Binary_Search/2563.Count-the-Number-of-Fair-Pairs/Readme.md @@ -0,0 +1,7 @@ +### 2563.Count-the-Number-of-Fair-Pairs + +初看这道题的时候,一个比较自然的想法是,遍历每个j,查看j之前的有多少个符合条件的元素满足`lower-nums[j] <= nums[i] <= upper-nums[j]`。这就要求j之前的元素必须是有序的。但是每考察完一个j,就需要将nums[0:j]重新排序,这样的复杂度太高。 + +这时候我们再换一个角度。这里i与j其实是对称的。我们没有必要纠结于`ij`,只要除以二就可以。所以我们只需要将nums排一次序。对于其中任意的元素x,用二分法求出有多少个符合条件的元素满足在`[lower-x, pper-x]`的区间范围内即可。 + +但是这里有个关键点,就是上述方法中,x本身可能就恰好在`[lower-x, pper-x]`的范围内。这样我们会误加上(x,x)这样的组合。所以我们要验证一下,如果(x,x)满足条件,就额外从计数器里减一。最终总的计数除以二,就是合法的`i& ranks, int cars) + { + LL left = 0, right = LLONG_MAX; + while (left < right) + { + LL mid = left + (right-left)/2; + if (isOK(mid, ranks, cars)) + right = mid; + else + left = mid+1; + } + return left; + } + + bool isOK(LL t, vector& ranks, int cars) + { + LL count = 0; + for (int r : ranks) + { + count += sqrt(t/r); + if (count >= cars) + return true; + } + return false; + + } +}; diff --git a/Binary_Search/2594.Minimum-Time-to-Repair-Cars/Readme.md b/Binary_Search/2594.Minimum-Time-to-Repair-Cars/Readme.md new file mode 100644 index 000000000..72ae59ab1 --- /dev/null +++ b/Binary_Search/2594.Minimum-Time-to-Repair-Cars/Readme.md @@ -0,0 +1,5 @@ +### 2594.Minimum-Time-to-Repair-Cars + +最基本的二分搜值。猜测一个时间t,看看在这个时间内所有人修车数目的总和是否大于等于cars。是的话试图减小t,否则的话试图增加t,直至收敛。 + +对于给定的t,每个人的修车数量就是`sqrt(t/r)`. diff --git a/Binary_Search/2604.Minimum-Time-to-Eat-All-Grains/2604.Minimum-Time-to-Eat-All-Grains.cpp b/Binary_Search/2604.Minimum-Time-to-Eat-All-Grains/2604.Minimum-Time-to-Eat-All-Grains.cpp new file mode 100644 index 000000000..5e1cfd2bf --- /dev/null +++ b/Binary_Search/2604.Minimum-Time-to-Eat-All-Grains/2604.Minimum-Time-to-Eat-All-Grains.cpp @@ -0,0 +1,55 @@ +class Solution { +public: + int minimumTime(vector& hens, vector& grains) + { + sort(hens.begin(), hens.end()); + sort(grains.begin(), grains.end()); + + int left = 0, right = INT_MAX; + while (left < right) + { + int mid = left + (right-left)/2; + if (isOK(mid, hens, grains)) + right = mid; + else + left = mid+1; + } + return left; + } + + bool isOK(int time, vector& hens, vector& grains) + { + int j = 0; + for (int i=0; iy,这不可能是最优解。 + +有了这个发现之后,接下来似乎还是无从下手,那就不妨二分搜值。显然我们会设定一个时间T,看看所有的母鸡能在此时间内把所有谷子都吃完。或者说,是否存在一种谷子区间的分配,能够在T里被各个母鸡吃到。如果可行,那么尝试降低T,否则我们就提高T,最终收敛到最优解。 + +现在考察这个判定函数。因为每个谷子都要被吃,显然我们就从第0号谷子开始考察:它必然是被第0号母鸡吃掉。假设0号谷子在0号母鸡左边,如果两者离得太远(超过了T),那么整体就返回无解。如果在范围内,那么意味着0号母鸡在移动到0号谷子的过程中遇到的所有谷子都能被吃掉。我们记0号母鸡移动到0号谷子的时间是t,那么母鸡在吃完0号谷子还需要返回再花时间t,此时如果还有剩余T-2t,那么就可以往右走,再多吃一点谷子,注意这一段是单程。由此我们可以确定0号母鸡吃的谷子的总数目,假设是j,那么下一个回合我们就考察第j个谷子和第1号母鸡之间的关系,再考察1号母鸡总共能吃几粒谷子,重复这个逻辑。 + +但是注意,在上面的0号母鸡策略中,其实还有另一种方案,就是先往右走,再折返,再往左边走t的时间保证吃掉0号谷子。这也是可行的。哪种方案更好呢?取决于0号母鸡往右边开拓的范围哪个更远。假设方案1比方案2更好,意味着 +``` +T - t*2 > (T-t) / 2 <=> T > 3*t +``` +也就是说,如果`T>3t`,我们就选取方案1,否则就选取方案2. + +由此我们顺次遍历谷子,将一个区间范围内的谷子归入下一个母鸡,在T的约束下确定这个区间范围。直至看能否把所有的谷子都分配完毕。 diff --git a/Binary_Search/2616.Minimize-the-Maximum-Difference-of-Pairs/2616.Minimize-the-Maximum-Difference-of-Pairs.cpp b/Binary_Search/2616.Minimize-the-Maximum-Difference-of-Pairs/2616.Minimize-the-Maximum-Difference-of-Pairs.cpp new file mode 100644 index 000000000..17fb3e7ba --- /dev/null +++ b/Binary_Search/2616.Minimize-the-Maximum-Difference-of-Pairs/2616.Minimize-the-Maximum-Difference-of-Pairs.cpp @@ -0,0 +1,32 @@ +class Solution { +public: + int minimizeMax(vector& nums, int p) + { + sort(nums.begin(), nums.end()); + int left = 0, right = INT_MAX; + while (left < right) + { + int mid = left + (right-left)/2; + if (isOK(nums, p, mid)) + right = mid; + else + left = mid+1; + } + return left; + } + + bool isOK(vector& nums, int p, int diff) + { + int n = nums.size(); + int count = 0; + for (int i=0; i= p); + } +}; diff --git a/Binary_Search/2616.Minimize-the-Maximum-Difference-of-Pairs/Readme.md b/Binary_Search/2616.Minimize-the-Maximum-Difference-of-Pairs/Readme.md new file mode 100644 index 000000000..34fc49dd2 --- /dev/null +++ b/Binary_Search/2616.Minimize-the-Maximum-Difference-of-Pairs/Readme.md @@ -0,0 +1,7 @@ +### 2616.Minimize-the-Maximum-Difference-of-Pairs + +我们首先容易想到的是将数组排序,这样我们选择的pairs必然都是相邻的元素。任何跳跃选择的pair都必然不会是最优解。接下来我们该如何选择这些pairs呢?此时陷入了困难。我们并不能贪心地找相邻最短的pair,比如这个例子:`1 3 4 6`,我们优先取{3,4}之后,剩下的{1,6}的差距更大了。 + +在正面突破没有思路的时候,不妨试一试反向的“猜答案”。二分搜值在这里恰好是适用的。假设最大间距是x,那么当x越大时,我们就越容易找p对符合条件的pairs(比如当x是无穷大时,pairs可以随意挑);反之当x越小时,就越不容易找到p对符合条件的pairs。以此不断调整x的大小,直至收敛。 + +于是接下来我们就考虑,假设最大间距是x,那么我们如何判定能否找到p对符合条件的pairs呢?为了尽量找到多的pairs,我们必然从小到大把这些元素都看一遍,尽量不浪费。假设最小的四个元素分别是abcd,并且他们彼此之间的间距都小于x,那么我们是否应该取a和b呢?如果取的话,那么可能带来的顾虑就是b就失去了和c配对的机会。不过这个顾虑是不必要的:如果我们选择了b和c,那么同样构造了一对,但a就白白浪费了。即使你可以将a与d配对且间距也小于x,那么也违背了我们之前的直觉,“我们永远只会取相邻的元素配对”。事实上(a,b)(c,d)的方案肯定是优于(b,c)(a,d)的。所以我们的结论就是,如果最小元素和它相邻元素的间距小于x,那么就贪心地配对;否则最小元素只能放弃。依次类推从小到大处理每一个元素,就可以知道我们最多能搞出多少个配对。 diff --git a/Binary_Search/2702.Minimum-Operations-to-Make-Numbers-Non-positive/2702.Minimum-Operations-to-Make-Numbers-Non-positive.cpp b/Binary_Search/2702.Minimum-Operations-to-Make-Numbers-Non-positive/2702.Minimum-Operations-to-Make-Numbers-Non-positive.cpp new file mode 100644 index 000000000..6a530d58b --- /dev/null +++ b/Binary_Search/2702.Minimum-Operations-to-Make-Numbers-Non-positive/2702.Minimum-Operations-to-Make-Numbers-Non-positive.cpp @@ -0,0 +1,29 @@ +class Solution { +public: + int minOperations(vector& nums, int x, int y) + { + sort(nums.rbegin(), nums.rend()); + int left = 0, right = INT_MAX/2; + while (left < right) + { + int mid = left+(right-left)/2; + if (isOK(mid, nums, x, y)) + right = mid; + else + left = mid+1; + } + return left; + } + + bool isOK(int k, vector& nums, int x, int y) + { + int count = 0; + for (int i=0; i k) return false; + } + return true; + } +}; diff --git a/Binary_Search/2702.Minimum-Operations-to-Make-Numbers-Non-positive/Readme.md b/Binary_Search/2702.Minimum-Operations-to-Make-Numbers-Non-positive/Readme.md new file mode 100644 index 000000000..79d01f3e9 --- /dev/null +++ b/Binary_Search/2702.Minimum-Operations-to-Make-Numbers-Non-positive/Readme.md @@ -0,0 +1,5 @@ +### 2702.Minimum-Operations-to-Make-Numbers-Non-positive + +此题很容易知道贪心的策略,肯定是将当前数组里最大的元素减去x,其他元素减去y。然后不断重复处理。但问题是如此暴力的模拟,在时间复杂度上无法接受。 + +此时二分搜值的想法就比较容易。我们尝试判定m次操作是否能将所有元素都降到0以下。关键之处在于我们可以将每次操作拆分为:将全部元素减去y,再挑一个元素减去x-y。那么m次操作必然是将所有元素都减掉了m个y,此外我们还有m次操作将剩余没有变成0的元素减去x-y。我们只要贪心的查看这些操作是否够将所有元素变成0即可。 diff --git a/Binary_Search/2819.Minimum-Relative-Loss-After-Buying-Chocolates/2819.Minimum-Relative-Loss-After-Buying-Chocolates.cpp b/Binary_Search/2819.Minimum-Relative-Loss-After-Buying-Chocolates/2819.Minimum-Relative-Loss-After-Buying-Chocolates.cpp new file mode 100644 index 000000000..52195fcfa --- /dev/null +++ b/Binary_Search/2819.Minimum-Relative-Loss-After-Buying-Chocolates/2819.Minimum-Relative-Loss-After-Buying-Chocolates.cpp @@ -0,0 +1,46 @@ +using LL = long long; +class Solution { + LL presum[100005]; +public: + vector minimumRelativeLosses(vector& prices, vector>& queries) + { + int n = prices.size(); + sort(prices.begin(), prices.end()); + + presum[0] = prices[0]; + for (int i=1; irets; + for (auto& arr: queries) + { + LL k = arr[0], m = arr[1]; + int left = 0, right = m; + while (left < right) + { + int mid = right - (right-left)/2; + if (mid==0 || mid==m) break; + if (prices[mid-1] < 2*k - prices[n-(m-mid)]) + left = mid; + else + right = mid-1; + } + int p = left; + LL ans1 = rangeSum(0, p-1) + 2*k*(m-p) - rangeSum(n-(m-p), n-1); + p++; + LL ans2 = rangeSum(0, p-1) + 2*k*(m-p) - rangeSum(n-(m-p), n-1); + rets.push_back(min(ans1, ans2)); + } + + return rets; + } + + LL rangeSum(int a, int b) + { + if (a>b) return 0LL; + if (a==0) + return presum[b]; + else + return presum[b]-presum[a-1]; + } +}; diff --git a/Binary_Search/2819.Minimum-Relative-Loss-After-Buying-Chocolates/Readme.md b/Binary_Search/2819.Minimum-Relative-Loss-After-Buying-Chocolates/Readme.md new file mode 100644 index 000000000..0ff68f4ea --- /dev/null +++ b/Binary_Search/2819.Minimum-Relative-Loss-After-Buying-Chocolates/Readme.md @@ -0,0 +1,5 @@ +### 2819.Minimum-Relative-Loss-After-Buying-Chocolates + +首先可以得出大致的策略,对于bob而言,要么选价格最便宜的(当价格pk时,代价函数是2k-p). 所以,选择的m件商品,必然在价格轴上一部分选在最左边,另一部分选在最右边。 + +假设我们选t件最便宜的,剩下m-t件是最贵的,那么该如何确定t的个数呢?我们希望选择商品的代价尽量远离峰值(p=k处),所以希望`price[t-1]`与`2k-price[n-(m-t)]`数值上尽量接近。否则因为t对两者影响的此消彼长,一方变低的话,另一方必然更高。所以我们尝试寻找最大的T,使得恰好`price[T-1] < 2k-price[n-(m-T)]`. 接下来,我们尝试T和T+1两个候选值,寻找两者之中能使总代价最优的解。总代价就是t件最便宜的代价`prices[0:t-1]`加上m-t件最贵的代价`2k*(m-t) - prices[n-(m-t): n-1]`. diff --git a/Binary_Search/2836.Maximize-Value-of-Function-in-a-Ball-Passing-Game/2836.Maximize-Value-of-Function-in-a-Ball-Passing-Game.cpp b/Binary_Search/2836.Maximize-Value-of-Function-in-a-Ball-Passing-Game/2836.Maximize-Value-of-Function-in-a-Ball-Passing-Game.cpp new file mode 100644 index 000000000..bd15bd280 --- /dev/null +++ b/Binary_Search/2836.Maximize-Value-of-Function-in-a-Ball-Passing-Game/2836.Maximize-Value-of-Function-in-a-Ball-Passing-Game.cpp @@ -0,0 +1,46 @@ +using LL = long long; +class Solution { +public: + long long getMaxFunctionValue(vector& receiver, long long k) + { + int n = receiver.size(); + int M = ceil(log(k)/log(2)); + vector>dp(n+1, vector(M+1)); + vector>pos(n+1, vector(M+1)); + + for (int i=0; ibits; + for (int i=0; i<=M; i++) + { + if ((k>>i)&1) + bits.push_back(i); + } + + LL ret = 0; + for (int i=0; i> next[10005]; + int count[10005][27]; + int parent[10005]; + int level[10005]; +public: + vector minOperationsQueries(int n, vector>& edges, vector>& queries) + { + for (auto& edge: edges) + { + int a = edge[0], b = edge[1], c = edge[2]; + next[a].push_back({b,c}); + next[b].push_back({a,c}); + } + + vectortemp(27); + dfs(0, 0, -1, temp); + parent[0] = -1; + + vectorrets; + for (auto query: queries) + { + int a = query[0], b = query[1]; + int lca = getLCA(0,a,b); + + vectortemp(27); + for (int i=1; i<=26; i++) + { + temp[i] += count[a][i]; + temp[i] += count[b][i]; + temp[i] -= 2*count[lca][i]; + } + + int sum = 0; + int mx = 0; + for (int i=1; i<=26; i++) + { + sum += temp[i]; + mx = max(mx, temp[i]); + } + + rets.push_back(sum - mx); + } + + return rets; + } + + void dfs(int cur, int l, int p, vector&temp) + { + for (auto& child: next[cur]) + { + if (child.first==p) continue; + int w = child.second; + + temp[w]+=1; + for (int i=1; i<=26; i++) + count[child.first][i] = temp[i]; + + parent[child.first] = cur; + level[child.first] = l+1; + + dfs(child.first, l+1, cur, temp); + temp[w]-=1; + } + } + + int getLCA(int node, int p, int q) + { + while (1) + { + if (level[p]>level[q]) + { + p = parent[p]; + } + else if (level[p]> next[10005]; + int count[10005][27]; + int parent[10005]; + int level[10005]; + int ancestor[10005][18]; +public: + vector minOperationsQueries(int n, vector>& edges, vector>& queries) + { + for (auto& edge: edges) + { + int a = edge[0], b = edge[1], c = edge[2]; + next[a].push_back({b,c}); + next[b].push_back({a,c}); + } + + vectortemp(27); + dfs(0, 0, -1, temp); + parent[0] = -1; + + for (int i=0; irets; + for (auto query: queries) + { + int a = query[0], b = query[1]; + // int lca = getLCA(0,a,b); + int lca = getLCA(a,b); + + vectortemp(27); + for (int i=1; i<=26; i++) + { + temp[i] += count[a][i]; + temp[i] += count[b][i]; + temp[i] -= 2*count[lca][i]; + } + + int sum = 0; + int mx = 0; + for (int i=1; i<=26; i++) + { + sum += temp[i]; + mx = max(mx, temp[i]); + } + + rets.push_back(sum - mx); + } + + return rets; + } + + void dfs(int cur, int l, int p, vector&temp) + { + for (auto& child: next[cur]) + { + if (child.first==p) continue; + int w = child.second; + + temp[w]+=1; + for (int i=1; i<=26; i++) + count[child.first][i] = temp[i]; + + parent[child.first] = cur; + level[child.first] = l+1; + + dfs(child.first, l+1, cur, temp); + temp[w]-=1; + } + } + + int getKthAncestor(int i, int k) + { + int cur = i; + for (int j=0; j<=17; j++) + { + if ((k>>j)&1) + cur = ancestor[cur][j]; + } + return cur; + } + + int getLCA(int a, int b) + { + while (level[a]!=level[b]) + { + if (level[a]q路径上的每种边权数目,即`count[p][j]+count[q][j]-2*count[lca][j]`,我们遍历一下j,就可以知道路径长度以及出现最多次的边权个数,两者之差就是query的答案。 + +那么如何求lca呢?我们需要在DFS的过程中,顺便知道每个节点的深度level[i]以及它的父节点parent[i].这样,我们先将p,q两点中较深的那个上溯到与另一个相同的深度,然后两者再一层一层共同向上追溯直至它们汇合,这个节点就是它们的LCA。这理论上是o(N)的算法。 + +有一个log(N)的LCA算法,就是利用binary lifting. 我们先利用parent的信息,预先计算出ancestor[i][j],表示节点i向上数第2^j层的祖先。这样我们就可以写出时间复杂度是log(n)的getKthAncestor的函数。对于任意的p与q,我们先计算出它们的深度差,用getKthAncestor将较深的那个节点拉至与另一个节点相同。然后用二分搜值,寻找最小的k,使得p与q的getKthAncestor相同,那么这个相同的节点就是它们的LCA。总的时间复杂度仍然是log(n). diff --git a/Binary_Search/2861.Maximum-Number-of-Alloys/2861.Maximum-Number-of-Alloys.cpp b/Binary_Search/2861.Maximum-Number-of-Alloys/2861.Maximum-Number-of-Alloys.cpp new file mode 100644 index 000000000..127a3378d --- /dev/null +++ b/Binary_Search/2861.Maximum-Number-of-Alloys/2861.Maximum-Number-of-Alloys.cpp @@ -0,0 +1,35 @@ +using LL = long long; +class Solution { +public: + int maxNumberOfAlloys(int n, int k, int budget, vector>& composition, vector& stock, vector& cost) + { + int ret = 0; + for (auto& comp : composition) + { + int left = 0, right = INT_MAX/2; + while (left < right) + { + int mid = right-(right-left)/2; + if (isOK(mid, n, budget, comp, stock, cost)) + left = mid; + else + right = mid-1; + } + ret = max(ret, left); + } + + return ret; + } + + bool isOK(int t, int n, int budget, vector&comp, vector& stock, vector& cost) + { + LL total = 0; + for (int i=0; i budget) + return false; + } + return true; + } +}; diff --git a/Binary_Search/2861.Maximum-Number-of-Alloys/Readme.md b/Binary_Search/2861.Maximum-Number-of-Alloys/Readme.md new file mode 100644 index 000000000..1ff0524db --- /dev/null +++ b/Binary_Search/2861.Maximum-Number-of-Alloys/Readme.md @@ -0,0 +1,3 @@ +### 2861.Maximum-Number-of-Alloys + +注意:All alloys must be created with the same machine. 对于每个machine,我们用二分搜值来确定在不超过budget的约束下、最多能生产alloy的个数。最后对所有机器取最大值。 diff --git a/Binary_Search/2972.Count-the-Number-of-Incremovable-Subarrays-II/2972.Count-the-Number-of-Incremovable-Subarrays-II.cpp b/Binary_Search/2972.Count-the-Number-of-Incremovable-Subarrays-II/2972.Count-the-Number-of-Incremovable-Subarrays-II.cpp new file mode 100644 index 000000000..6d04a7d92 --- /dev/null +++ b/Binary_Search/2972.Count-the-Number-of-Incremovable-Subarrays-II/2972.Count-the-Number-of-Incremovable-Subarrays-II.cpp @@ -0,0 +1,36 @@ +using LL = long long; +class Solution { +public: + long long incremovableSubarrayCount(vector& nums) + { + int n = nums.size(); + nums.insert(nums.begin(), INT_MIN); + nums.push_back(INT_MAX); + + int l = 1; + while (l<=n) + { + if (nums[l]0) + { + if (nums[r]>nums[r-1]) r--; + else break; + } + if (r0) + { + if (nums[r]>nums[r-1]) r--; + else break; + } +``` +对于[1:L]里的每一个位置i,我们可以在[R,n]用二分找到恰好大于nums[i]的位置j。那么符合条件的后段的左边界可以是j,j+1,...,n,总共有n+1-j个。 + +但是以上的思考意识强制要求前段、中段、后段都不能为空,而忽略了对应的三种情况:前段为空,中段为空、或者后段为空。需要额外考虑。 + +1. 如果中段为空,那么说明nums整体都是递增的,直接返回nums里的子区间的数目:n(n-1)/2+n. +2. 如果前端为空,或者后端为空,我们可以有一个巧妙的处理,使得之前的逻辑依然适用。在nums前添加一个无穷小(index是0),后面添加一个无穷大(index是n+1)。这样,i的遍历可以从0开始(意味着其实左段为空);而在[R:n+1]区间里进行二分的过程中可能会找到n+1这个位置(即认为后段的左边界是n+1),这其实就意味着允许了后段为空。 diff --git a/Binary_Search/3048.Earliest-Second-to-Mark-Indices-I/3048.Earliest-Second-to-Mark-Indices-I.cpp b/Binary_Search/3048.Earliest-Second-to-Mark-Indices-I/3048.Earliest-Second-to-Mark-Indices-I.cpp new file mode 100644 index 000000000..7349d072e --- /dev/null +++ b/Binary_Search/3048.Earliest-Second-to-Mark-Indices-I/3048.Earliest-Second-to-Mark-Indices-I.cpp @@ -0,0 +1,53 @@ +class Solution { + int n,m; +public: + int earliestSecondToMarkIndices(vector& nums, vector& changeIndices) + { + n = nums.size(); + m = changeIndices.size(); + nums.insert(nums.begin(), 0); + changeIndices.insert(changeIndices.begin(), 0); + + int left=1, right=m; + while (left < right) + { + int mid = left + (right-left)/2; + + if (isOK(mid, nums, changeIndices)) + right = mid; + else + left = mid+1; + } + + if (!isOK(left, nums, changeIndices)) return -1; + else return left; + } + + bool isOK(int m, vector& nums, vector& changeIndices) + { + vectorlast(n+1); + for (int i=1; i<=m; i++) + last[changeIndices[i]]=i; + + for (int i=1; i<=n; i++) + if (last[i]==0) return false; + + int count = 0; + for (int i=1; i<=m; i++) + { + int idx = changeIndices[i]; + + if (i!=last[idx]) + { + count++; + } + else + { + count -= nums[idx]; + if (count < 0) return false; + } + } + + return true; + } +}; diff --git a/Binary_Search/3048.Earliest-Second-to-Mark-Indices-I/Readme.md b/Binary_Search/3048.Earliest-Second-to-Mark-Indices-I/Readme.md new file mode 100644 index 000000000..7a08e8eb3 --- /dev/null +++ b/Binary_Search/3048.Earliest-Second-to-Mark-Indices-I/Readme.md @@ -0,0 +1,7 @@ +### 3048.Earliest-Second-to-Mark-Indices-I + +首先要看出这道题存在单调性,肯定时间越多的话,越有机会将所有元素都清零并标记。由此我们考虑尝试二分搜值。二分法的优点是,不用直接求“最优解”,而是转化为判定“可行解”,难度上会小很多。 + +本题里,我们需要在给定时间s的情况下,问能否将所有元素都清零并标记。对于一个给定的index而言,我们必须在对其“标记”前完成清零,因此我们肯定会将“标记”操作尽量延后,方便腾出更多时间做减一的操作。显然,我们会贪心地将index最后一次出现的时刻做“标记”操作;而如果index出现了不止一次多次,那么除了在最后一次的时刻做“标记”外,其余的时刻都会留作做“减一”操作(但不一定针对nums[idx])。为了顺利能够在最后一次index出现的时候做标记,我们需要保证之前积累的“减一”操作足够多,能够大于等于nums[idx]即可。于是我们只要顺着index出现的顺序,模拟上述的操作:要么积累“减一”操作count(如果不是最后一个出现),要么进行“标记”操作(如果是最后一次操作)。对于后者,能进行“标记”操作的前提是已经对那个index的数进行多次减一至零,故要求`count>=nums[idx]`. + +如果能够顺利地走完changeIndices[1:s]、并且将所有的nums都完成“标记”的话,就说明s秒能够实现目标。 diff --git a/Binary_Search/3049.Earliest-Second-to-Mark-Indices-II/3049.Earliest-Second-to-Mark-Indices-II.cpp b/Binary_Search/3049.Earliest-Second-to-Mark-Indices-II/3049.Earliest-Second-to-Mark-Indices-II.cpp new file mode 100644 index 000000000..f13c3cacc --- /dev/null +++ b/Binary_Search/3049.Earliest-Second-to-Mark-Indices-II/3049.Earliest-Second-to-Mark-Indices-II.cpp @@ -0,0 +1,67 @@ +using LL = long long; +class Solution { + int n,m; +public: + int earliestSecondToMarkIndices(vector& nums, vector& changeIndices) + { + n = nums.size(); + m = changeIndices.size(); + + nums.insert(nums.begin(), 0); + changeIndices.insert(changeIndices.begin(), 0); + + int left=1, right=m; + while (left < right) + { + int mid = left + (right-left)/2; + + if (isOK(mid, nums, changeIndices)) + right = mid; + else + left = mid+1; + } + + if (!isOK(left, nums, changeIndices)) return -1; + else return left; + } + + bool isOK(int t, vector&nums, vector changeIndices) + { + if (tfirst(n+1, 0); + for (int i=1; i<=t; i++) + { + if (first[changeIndices[i]]==0 && nums[changeIndices[i]]!=0) + first[changeIndices[i]]=i; + else + changeIndices[i] = 0; + } + + LL total = accumulate(nums.begin(), nums.end(), 0ll); + + multisetresets; + for (int i=t; i>=1; i--) + { + int idx = changeIndices[i]; + + if (idx == 0) continue; + + int marks = (t-i+1) - (resets.size() + 1); + if (resets.size() +1 > marks) + { + resets.insert(nums[idx]); + resets.erase(resets.begin()); + } + else + { + resets.insert(nums[idx]); + } + } + + LL total_clear = 0; + for (int x: resets) total_clear+=x; + + return total_clear + (t-n-resets.size()) >= total; + } +}; diff --git a/Binary_Search/3049.Earliest-Second-to-Mark-Indices-II/Readme.md b/Binary_Search/3049.Earliest-Second-to-Mark-Indices-II/Readme.md new file mode 100644 index 000000000..a29d5808e --- /dev/null +++ b/Binary_Search/3049.Earliest-Second-to-Mark-Indices-II/Readme.md @@ -0,0 +1,11 @@ +### 3049.Earliest-Second-to-Mark-Indices-II + +首先,容易看出此题的答案具有单调性。时间越长,就越容易有清零的机会,也就越容易实现目标。所以我们在最外层套用二分搜值的框架,将求“最优解”的问题,转化为判定“可行解”的问题。 + +假设给出T秒的时刻,如何判定是否可行呢?我们发现,“清零”操作的性价比是非常高的,如果对于某个index有机会做“清零”操作,我们必然这样做(除非nums[idx]本身就是零)。如果对于某个index,它在时间序列里出现了多次,我们会在哪个时候去清零呢?相对而言我们尽早清零是最优的选择,因为如果你晚些时候去做清零操作,可能存在一个风险:后续没有足够的机会取做“标记”操作了。由此,我们可以在时间序列changeIndices里面,预处理得到哪些时刻我们是在做“清零”。 + +至此,我们知道哪些时候做“清零”,其余的时候基本首选就是做“减一”,而唯一的制约因素就是要在最后留有足够“标记”的机会。当然并不是无脑的选最后n秒都做“标记”,因为有些“清零”可能在很靠后的时刻才会发生。怎么制定策略呢?这就需要从后往前去安排。 + +我们维护一个multiset叫做resets,里面存放那些确定要进行清零操作的nums的数值。当我们从后往前遍历的时候,判定时刻i是否能够进行“清零”,有以下两个条件:1.时刻i本身正是之前已经“计划”进行清零的时刻。2.假设i时刻进行清零的话,要保证在剩余的[i:t]的时间里,进行清零的数量(即multiset里面的元素个数)要小于剩余时间的一半,这样才能保证这些被清零的元素有机会被“标记”。如果不满足条件怎么办呢,这意味着我们不能增加这个清零名额,但是可以“调换”一个清零名额,将resets里面腾出一个来转给当前的index做清零。我们这样做有什么好处呢?这是因为也许可以减少“减一”操作的次数。假设当前时刻的元素如果做清零的话效果是-5,而resets里面有一个元素做清零其效果是-3,那么显然我们需要将resets里面做清零的元素拿出来留给当前元素。这叫做“反悔贪心”。 + +最终从后往前走完一遍之后,我们就知道哪些元素是真正需要被实施“清零”的。刨去这些之外,最后的n个时刻显然就是做“标记”操作的。这个安排保证了所有被清零的元素都能够得到“标记”。剩下的时刻就是应该做“减一”操作:我们必须要求所有“减一”操作的效果,加上“清零”操作的效果,最终一定是要大于nums里元素的总和。满足这些之后,才能判定目标在时间t内可行。 diff --git a/Binary_Search/3097.Shortest-Subarray-With-OR-at-Least-K-II/3097.Shortest-Subarray-With-OR-at-Least-K-II.cpp b/Binary_Search/3097.Shortest-Subarray-With-OR-at-Least-K-II/3097.Shortest-Subarray-With-OR-at-Least-K-II.cpp new file mode 100644 index 000000000..8df5624a5 --- /dev/null +++ b/Binary_Search/3097.Shortest-Subarray-With-OR-at-Least-K-II/3097.Shortest-Subarray-With-OR-at-Least-K-II.cpp @@ -0,0 +1,45 @@ +class Solution { +public: + int minimumSubarrayLength(vector& nums, int k) + { + int n = nums.size(); + int left = 1, right = n; + while (left < right) + { + int mid = left + (right-left)/2; + if (isOK(nums, k, mid)) + right = mid; + else + left = mid+1; + } + if (!isOK(nums, k, left)) return -1; + else return left; + } + + bool isOK(vector&nums, int k, int len) + { + vectorcount(31); + for (int i=0; i>j)&1); + } + + for (int i=len-1; i>j)&1); + + int sum = 0; + for (int j=0; j<31; j++) + if (count[j]>0) sum += (1<= k) return true; + + for (int j=0; j<31; j++) + count[j] -= ((nums[i-len+1]>>j)&1); + } + + return false; + } +}; diff --git a/Binary_Search/3097.Shortest-Subarray-With-OR-at-Least-K-II/Readme.md b/Binary_Search/3097.Shortest-Subarray-With-OR-at-Least-K-II/Readme.md new file mode 100644 index 000000000..c9088556d --- /dev/null +++ b/Binary_Search/3097.Shortest-Subarray-With-OR-at-Least-K-II/Readme.md @@ -0,0 +1,5 @@ +### 3097.Shortest-Subarray-With-OR-at-Least-K-II + +对于bitwise OR的操作,最大的特点是,OR的对象越多,答案越大。于是本题的答案显然具有单调性,越长的subarray越容易得到超过K的结果。所以我们只需要二分搜索长度即可。 + +于是本题就转化成了,对于一个固定长度L,判断是否存在这样长度的滑窗,使得里面元素的bitwise OR的结果大于等于K。我们只需要在滑窗移动的过程中,记录每个bit位上出现过多少次1即可。只要存在至少一个1,那么bitwise OR的结果在该位上就是1. diff --git a/Binary_Search/3399.Smallest-Substring-With-Identical-Characters-II/3399.Smallest-Substring-With-Identical-Characters-II.cpp b/Binary_Search/3399.Smallest-Substring-With-Identical-Characters-II/3399.Smallest-Substring-With-Identical-Characters-II.cpp new file mode 100644 index 000000000..5a0c7928d --- /dev/null +++ b/Binary_Search/3399.Smallest-Substring-With-Identical-Characters-II/3399.Smallest-Substring-With-Identical-Characters-II.cpp @@ -0,0 +1,61 @@ +class Solution { +public: + int minLength(string s, int numOps) + { + vectorarr; + vectornums; + for (auto ch: s) nums.push_back(ch-'0'); + + int n = s.size(); + for (int i=0; iarr, vector&nums, int len, int numOps) + { + if (len==1) + { + int count = 0; + for (int i=0; inumOps) + return false; + } + return true; + } +}; diff --git a/Binary_Search/3399.Smallest-Substring-With-Identical-Characters-II/Readme.md b/Binary_Search/3399.Smallest-Substring-With-Identical-Characters-II/Readme.md new file mode 100644 index 000000000..b32e12466 --- /dev/null +++ b/Binary_Search/3399.Smallest-Substring-With-Identical-Characters-II/Readme.md @@ -0,0 +1,18 @@ +### 3399.Smallest-Substring-With-Identical-Characters-II + +容易发现,只要操作次数越多,就越容易将最长的identical-chracter sbustring长度降下来,所以很明显适合二分搜值的框架。 + +于是问题转变成给定一个len,问是否能在numOps次flip操作内,使得s里不存在超过长度len的identical substring。 + +我们将原字符串里进行预处理,分割为一系列由相同字符组成的子串。对于任意一段长度为x的子串,我们至少需要做多少次flip呢?很明显,贪心思想就可以得到最优解,即每隔len个字符,我们就做一次flip。假设最少做t次flip,我们需要满足 +``` +x <= (len+1) * t + len +``` +才能保证x里面不会有超过长度为len的identical substring。于是求得`t>=(x-k)/(k+1)`。不等式右边是小数时,取上界整数。 + +但是此题有一个坑。如果len是1的话,那么当x是偶数时,会给下一段x带来困扰。比如说s=00001111,我们在处理第一段0000时,会得到贪心的做法做两次flip使得其变成0101。我们发现这样的话下一段的1111其实需要处理的长度是5,给算法带来了极大的不便。比较简单的处理方法就是对len=1的情况特别处理,跳出之前的思维模式,只要考虑将s强制转换为01相间的字符串,计算需要做的flip即可。 + +那为什么len是2的时候我们就不用担心呢?距离s=000111,如果按照贪心的做法,对于第一段000我们需要做一次flip使得其变成001,似乎依然会影响到下一段的111. 但事实上,对于一段我们不需要变换成001,可以将最右端的1随意往左调动一下,变换成010。结果就是flip的次数不变的前提下,依然可以保证不会出现长度超过2的identical substring。注意,回顾一下二分搜值的框架,我们不要求一定要构造出长度为2的identical substring。 + +同理当len=3时,也不会出现类似影响下一段的困扰。之前的计算t的表达式依然可以适用。 + diff --git a/Binary_Search/3449.Maximize-the-Minimum-Game-Score/3449.Maximize-the-Minimum-Game-Score.cpp b/Binary_Search/3449.Maximize-the-Minimum-Game-Score/3449.Maximize-the-Minimum-Game-Score.cpp new file mode 100644 index 000000000..eeea34179 --- /dev/null +++ b/Binary_Search/3449.Maximize-the-Minimum-Game-Score/3449.Maximize-the-Minimum-Game-Score.cpp @@ -0,0 +1,54 @@ +using LL = long long; +class Solution { + int n; +public: + long long maxScore(vector& points, int m) + { + n = points.size(); + LL left = 0, right = 1e15; + while (left < right) + { + LL mid = right - (right-left)/2; + if (checkOK(points, m, mid)) + left = mid; + else + right = mid-1; + } + return left; + } + + bool checkOK(vector& points, LL M, LL P) + { + LL count = 1; + LL cur = points[0]; + + for (int i=0; i=P) return true; + LL d = (P-cur-1) / points[i] + 1; + return count+d*2 <= M; + } + + if (cur>=P) + { + count++; + if (count > M) return false; + cur = points[i+1]; + } + else + { + LL d = (P-cur-1) / points[i] + 1; + if (i==n-2 && count+d*2<=M && points[i+1]*d>=P) + return true; + + count += 2*d+1; + if (count > M) return false; + cur = points[i+1] * (d+1); + } + } + + return true; + } +}; diff --git a/Binary_Search/3449.Maximize-the-Minimum-Game-Score/Readme.md b/Binary_Search/3449.Maximize-the-Minimum-Game-Score/Readme.md new file mode 100644 index 000000000..53d35b1f8 --- /dev/null +++ b/Binary_Search/3449.Maximize-the-Minimum-Game-Score/Readme.md @@ -0,0 +1,11 @@ +### 3449.Maximize-the-Minimum-Game-Score + +考虑到m的范围异常的大,本题极有可能是二分搜值。我们猜测一个值X,然后检验是否能在m次移动后,使得所有的元素都大于等于X。 + +移动的策略似乎很明显可以贪心。当我在0号位置的时候,如果还没有实现得分大于X,必然会通过先朝右再朝左的反复横跳d次,直至满足0号位置大于等于X。为什么只选择在0号和1号位置的反复横跳而不是更大的幅度?感觉没有必要。如果更大幅度的反复横跳,不仅在0号位置和1号位置上各自增加d次赋分,而且会在2号及之后的位置上也增加d次赋分,但这些赋分是否值得呢?不见得。因此,我们只需要老老实实每次做幅度为1的来回横跳即可。 + +综上,我们的算法是:当我们来到i时,查看在该位置是否已经超过了预期得分。如果没有,那就计算还需要几次赋分(假设记作d次)。然后就再做`=>(i+1)=>i`的d次反复移动。在i位置上满足之后,再移动依次到i+1的位置上,此时注意我们已经在i+1的位置上得到了`points[i+1]*(d+1)`的分数。然后重复上述的过程。 + +需要特别注意的边界逻辑有两个地方: +1. 如果走到了最后一个位置,仍没有超过预期得分,那么只能进行“往左再往右”的反复横跳。 +2. 如果走到了倒数第二个位置,经过几次横跳之后,发现在此位置和下一个位置都已经满足了得分预期,那么最后一步可以不用再走了。 diff --git a/Binary_Search/3464.Maximize-the-Distance-Between-Points-on-a-Square/3464.Maximize-the-Distance-Between-Points-on-a-Square.cpp b/Binary_Search/3464.Maximize-the-Distance-Between-Points-on-a-Square/3464.Maximize-the-Distance-Between-Points-on-a-Square.cpp new file mode 100644 index 000000000..c8cebf22b --- /dev/null +++ b/Binary_Search/3464.Maximize-the-Distance-Between-Points-on-a-Square/3464.Maximize-the-Distance-Between-Points-on-a-Square.cpp @@ -0,0 +1,73 @@ +using ll = long long; +class Solution { + vectorarr; + int next[15000]; + int n; + ll side; +public: + ll pos(int j) { + if (j= i+n) { + flag = false; + break; + } + } + if (pos(i)-pos(cur%n)>& points, int k) { + this->n = points.size(); + this->side = side; + for (auto& p: points) { + if (p[0]==0) + arr.push_back(p[1]); + else if (p[1]==side) + arr.push_back(side+p[0]); + else if (p[0]==side) + arr.push_back(2ll*side+side-p[1]); + else if (p[1]==0) + arr.push_back(3ll*side+side-p[0]); + } + + sort(arr.begin(), arr.end()); + + int low = 0, high = side; + while (low < high) { + int mid = high - (high-low)/2; + if (isOK(mid, k)) + low = mid; + else + high = mid-1; + } + return low; + } +}; diff --git a/Binary_Search/3464.Maximize-the-Distance-Between-Points-on-a-Square/Readme.md b/Binary_Search/3464.Maximize-the-Distance-Between-Points-on-a-Square/Readme.md new file mode 100644 index 000000000..8d1439d80 --- /dev/null +++ b/Binary_Search/3464.Maximize-the-Distance-Between-Points-on-a-Square/Readme.md @@ -0,0 +1,22 @@ +### 3464.Maximize-the-Distance-Between-Points-on-a-Square + +我们沿着原点顺时针将所有的点放入一个数组,数组元素是每个点与原点的距离(沿着边行走)。注意题目中k大于等于4,说明点和点之间的最小曼哈顿距离不可能大于side。否则四个点绕一圈,总距离就大于四倍边长了。 + +我们可以尝试二分搜索答案。假设猜测最小间隔距离是d,那么我们可以将任意一点作为起点,以d为极限间隔找到下一个点,以此类推找到k个点。如果第k个点没有“套圈”起点,并且离起点的距离依然大于d,那么就是一个符合条件的方案。因为k很小,我们遍历所有点作为起点都尝试一遍,时间复杂度为o(nk),加上二分搜索的框架,总的时间复杂度是可以接受。 + +更具体的做法是,我们先用双指针,求得每个点i右边距离恰好不超过d的点next[i]。注意i的编号范围是0到n-1,如果i是接近套圈靠近原点的位置,next[i]的位置也可能会越过原点。故我们定义next[i]的范围是0到2n-1。 + +假设我们从p开始,做k次跨度为d跳转时,应该写成这样: +```cpp +for (int t=0; t=n),那么它的下一个超出了next的定义域,故我们需要用`next[p%n]`。同时因为p的下一个必然也是越过原点的,故我们需要再加n。 + +在任何时刻,如果p点套圈了当初的起点(即p>=start+n),这说明无法实现在四周分布k个点的要求(因为第k个位置与第一个位置重合)。此外,即使p没有套圈起点,但是距离与起点少于d,也是说明无法实现要求。其余的情况,都说明有解。 + +注意,本题是一定有解的,故二分搜索的收敛解就是最优解。 diff --git a/Binary_Search/3534.Path-Existence-Queries-in-a-Graph-II/3534.Path-Existence-Queries-in-a-Graph-II.cpp b/Binary_Search/3534.Path-Existence-Queries-in-a-Graph-II/3534.Path-Existence-Queries-in-a-Graph-II.cpp new file mode 100644 index 000000000..2b66e7612 --- /dev/null +++ b/Binary_Search/3534.Path-Existence-Queries-in-a-Graph-II/3534.Path-Existence-Queries-in-a-Graph-II.cpp @@ -0,0 +1,69 @@ +using ll = long long; +const int MAXN = 100000; +const int LOGN = 17; +class Solution { + int up[MAXN][LOGN+1]; +public: + ll stepUp(int u, int k) { + for (int i=LOGN; i>=0; i--) { + if ((k>>i)&1) { + u = up[u][i]; + } + } + return u; + } + + vector pathExistenceQueries(int n, vector& nums, int maxDiff, vector>& queries) { + vector>arr; + for (int i=0; iidx; + for (int i=0; irets; + for (auto& q: queries) { + if (q[0]==q[1]) { + rets.push_back(0); + continue; + } + int u = idx[q[0]], v = idx[q[1]]; + if (u>v) swap(u,v); + + int low = 1, high = 1e5; + while (low < high) { + int mid = low + (high-low)/2; + int k = stepUp(u, mid); + if (arr[k].first >= arr[v].first) + high = mid; + else + low = mid+1; + } + int k = stepUp(u, low); + if (arr[k].first >= arr[v].first) + rets.push_back(low); + else + rets.push_back(-1); + } + + return rets; + } +}; diff --git a/Binary_Search/3534.Path-Existence-Queries-in-a-Graph-II/Readme.md b/Binary_Search/3534.Path-Existence-Queries-in-a-Graph-II/Readme.md new file mode 100644 index 000000000..07f411383 --- /dev/null +++ b/Binary_Search/3534.Path-Existence-Queries-in-a-Graph-II/Readme.md @@ -0,0 +1,13 @@ +### 3534.Path-Existence-Queries-in-a-Graph-II + +我们将所有的数按照从小到大的顺序排列之后,本题的问题就是:给出任意两点ux, x-y, y->z, ... 如果最终的位置能够超越v,那么答案就是肯定。反之,如果问{u,v}之间最少用多少步可以实现跨越,那么显然我们可以用二分搜值再验证,从而逼近最优解。 + +对于验证的过程,我们如果线性地去模拟k次跳跃,时间是不够的。因此我们会用到binary lifting(倍增)算法。不仅计算x和它一步所能跳跃的最远点y之间的路径,而且还预处理`up[x][k]`,表示从x点往右跳跃`2^k`步所能到达的最远位置,其中k最大值是17即可。在准备好了up数组之后,二分搜值的验证过程只需要log(N)次的跳转即可。 + + + + diff --git a/Binary_Search/3553.Minimum-Weighted-Subgraph-With-the-Required-Paths-II/3553.Minimum-Weighted-Subgraph-With-the-Required-Paths-II.cpp b/Binary_Search/3553.Minimum-Weighted-Subgraph-With-the-Required-Paths-II/3553.Minimum-Weighted-Subgraph-With-the-Required-Paths-II.cpp new file mode 100644 index 000000000..440611fad --- /dev/null +++ b/Binary_Search/3553.Minimum-Weighted-Subgraph-With-the-Required-Paths-II/3553.Minimum-Weighted-Subgraph-With-the-Required-Paths-II.cpp @@ -0,0 +1,80 @@ +using ll = long long; +const int MAXN = 100000; +const int LOGN = 17; +class Solution { +public: + vector> adj[MAXN]; + int up[MAXN][LOGN+1]; + int depth[MAXN]; + ll distRoot[MAXN]; + + void dfs(int cur, int parent) + { + up[cur][0] = parent; + for(auto &[v,w]: adj[cur]) + { + if(v == parent) continue; + depth[v] = depth[cur] + 1; + distRoot[v] = distRoot[cur] + w; + dfs(v, cur); + } + } + + int lca(int a, int b) + { + if(depth[a] < depth[b]) swap(a,b); + int diff = depth[a] - depth[b]; + for(int k = 0; k <= LOGN; k++){ + if(diff & (1<= 0; k--){ + if(up[a][k] != up[b][k]){ + a = up[a][k]; + b = up[b][k]; + } + } + return up[a][0]; + } + + ll dist(int a, int b) + { + int c = lca(a,b); + return distRoot[a] + distRoot[b] - 2*distRoot[c]; + } + + vector minimumWeight(vector>& edges, vector>& queries) + { + int n = edges.size()+1; + + for (int i = 0; i < n-1; i++) + { + int u = edges[i][0], v = edges[i][1], w = edges[i][2]; + adj[u].push_back({v,w}); + adj[v].push_back({u,w}); + } + + depth[0] = 0; + distRoot[0] = 0; + dfs(0, 0); + + for(int k = 1; k <= LOGN; k++) { + for(int v = 0; v < n; v++) { + up[v][k] = up[up[v][k-1]][k-1]; + } + } + + vectorrets; + for (auto& q: queries) + { + int u = q[0], v = q[1], w = q[2]; + ll d_uv = dist(u,v); + ll d_vw = dist(v,w); + ll d_uw = dist(u,w); + ll ans = (d_uv + d_vw + d_uw) / 2; + rets.push_back(ans); + } + + return rets; + } +}; diff --git a/Binary_Search/3553.Minimum-Weighted-Subgraph-With-the-Required-Paths-II/Readme.md b/Binary_Search/3553.Minimum-Weighted-Subgraph-With-the-Required-Paths-II/Readme.md new file mode 100644 index 000000000..f210532f1 --- /dev/null +++ b/Binary_Search/3553.Minimum-Weighted-Subgraph-With-the-Required-Paths-II/Readme.md @@ -0,0 +1,9 @@ +### 3553.Minimum-Weighted-Subgraph-With-the-Required-Paths-II + +本题的第一个知识点是:在一棵树里,联通u,v,w三个节点的最小子树的权重和,就是`[dist(u,v)+dist(u,w)+dist(v,w)]/2`. + +本体的第二个知识点是:在一棵树里,联通x,y两点的路径长度,等于`dist(r,x)+dist(r,y)-2*dist(r,c)`,其中r是整棵树的根节点,c是x和y的LCA(lowest common ancester)。 + +任意一点到距离根节点的距离dist(r,x)可以通过DFS得到。于是本题的关键点就是求任意两点的LCA,于是就是一个binary list经典题。 + +我们需要处理得到一个数组up[v][k],表示节点v往上(朝根节点方向)走2^k步能够得到的位置。转移方程就是`up[v][k] = up[up[v][k-1]][k-1]`. 边界条件就是对于一对父子节点a->b,有`up[b][0]=a`. diff --git a/Binary_Search/3559.Number-of-Ways-to-Assign-Edge-Weights-II/3559.Number-of-Ways-to-Assign-Edge-Weights-II.cpp b/Binary_Search/3559.Number-of-Ways-to-Assign-Edge-Weights-II/3559.Number-of-Ways-to-Assign-Edge-Weights-II.cpp new file mode 100644 index 000000000..fd59af59b --- /dev/null +++ b/Binary_Search/3559.Number-of-Ways-to-Assign-Edge-Weights-II/3559.Number-of-Ways-to-Assign-Edge-Weights-II.cpp @@ -0,0 +1,93 @@ +using ll = long long; +const int MAXN = 100005; +const int LOGN = 17; +class Solution { +public: + vector> adj[MAXN]; + int up[MAXN][LOGN+1]; + int depth[MAXN]; + ll distRoot[MAXN]; + + void dfs(int cur, int parent) + { + up[cur][0] = parent; + for(auto &[v,w]: adj[cur]) + { + if(v == parent) continue; + depth[v] = depth[cur] + 1; + distRoot[v] = distRoot[cur] + w; + dfs(v, cur); + } + } + + int lca(int a, int b) + { + if(depth[a] < depth[b]) swap(a,b); + int diff = depth[a] - depth[b]; + for(int k = 0; k <= LOGN; k++){ + if(diff & (1<= 0; k--){ + if(up[a][k] != up[b][k]){ + a = up[a][k]; + b = up[b][k]; + } + } + return up[a][0]; + } + + ll dist(int a, int b) + { + int c = lca(a,b); + return distRoot[a] + distRoot[b] - 2*distRoot[c]; + } + + ll stepUp(int u, int k) { + for (int i=LOGN; i>=0; i--) { + if ((k>>i)&1) { + u = up[u][i]; + } + } + return u; + } + + vector assignEdgeWeights(vector>& edges, vector>& queries) { + int n = edges.size()+1; + + for (auto& edge: edges) + { + int u = edge[0], v = edge[1], w = 1; + adj[u].push_back({v,w}); + adj[v].push_back({u,w}); + } + + depth[1] = 0; + distRoot[1] = 0; + dfs(1, 1); + + for(int k = 1; k <= LOGN; k++) { + for(int v = 1; v <= n; v++) { + up[v][k] = up[up[v][k-1]][k-1]; + } + } + + vectorpower(n+1); + ll M = 1e9+7; + power[0] = 1; + for (int i=1; i<=n; i++) + power[i] = power[i-1]*2%M; + + vectorrets; + for (auto&q: queries) { + int u = q[0], v = q[1]; + int d = dist(u,v); + if (d==0) + rets.push_back(0); + else + rets.push_back(power[d-1]); + } + + return rets; + } +}; diff --git a/Binary_Search/3559.Number-of-Ways-to-Assign-Edge-Weights-II/Readme.md b/Binary_Search/3559.Number-of-Ways-to-Assign-Edge-Weights-II/Readme.md new file mode 100644 index 000000000..c24856884 --- /dev/null +++ b/Binary_Search/3559.Number-of-Ways-to-Assign-Edge-Weights-II/Readme.md @@ -0,0 +1,10 @@ +### 3559.Number-of-Ways-to-Assign-Edge-Weights-II + +我们很容易看出,可以用binary lifting高效地求出任意两点之间的edge的个数d。显然,每段edge可以赋值1或者2,因此总共会有2^d种组合。其中有多少种方法能使得总路径长度恰好是奇数呢?结论很简单,就是它们的一半,即2^(d-1)种。 + +我们可以用动态规划来推论一下。dp1[i]表示i条边组成的总长度为奇数的组合数,dp2[i]表示i条边组成的总长度为偶数的组合数。我们的转移方程是 +``` +dp1[i] = dp2[i-1]+dp1[i-1]; +dp2[i] = dp1[i-1]+dp2[i-1]; +``` +初始条件是`dp1[1]=dp2[1]=1`,显然会有对任意的i,都有`dp1[i]=dp2[i]`。故i条边组成的总长度为偶数和奇数的组合数一定相等。 diff --git a/Binary_Search/3585.Find-Weighted-Median-Node-in-Tree/3585.Find-Weighted-Median-Node-in-Tree.cpp b/Binary_Search/3585.Find-Weighted-Median-Node-in-Tree/3585.Find-Weighted-Median-Node-in-Tree.cpp new file mode 100644 index 000000000..15e4011a6 --- /dev/null +++ b/Binary_Search/3585.Find-Weighted-Median-Node-in-Tree/3585.Find-Weighted-Median-Node-in-Tree.cpp @@ -0,0 +1,110 @@ +using ll = long long; +const int MAXN = 100000; +const int LOGN = 17; + +class Solution { +public: + vector> adj[MAXN]; + int up[MAXN][LOGN+1]; + int depth[MAXN]; + ll distRoot[MAXN]; + + void dfs(int cur, int parent) + { + up[cur][0] = parent; + for(auto &[v,w]: adj[cur]) + { + if(v == parent) continue; + depth[v] = depth[cur] + 1; + distRoot[v] = distRoot[cur] + w; + dfs(v, cur); + } + } + + int lca(int a, int b) + { + if(depth[a] < depth[b]) swap(a,b); + int diff = depth[a] - depth[b]; + for(int k = 0; k <= LOGN; k++){ + if(diff & (1<= 0; k--){ + if(up[a][k] != up[b][k]){ + a = up[a][k]; + b = up[b][k]; + } + } + return up[a][0]; + } + + int stepUp(int u, int k) { + for (int i=16; i>=0; i--) { + if ((k>>i)&1) { + u = up[u][i]; + } + } + return u; + } + + ll dist(int a, int b) + { + int c = lca(a,b); + return distRoot[a] + distRoot[b] - 2*distRoot[c]; + } + + vector findMedian(int n, vector>& edges, vector>& queries) { + for (int i = 0; i < n-1; i++) + { + int u = edges[i][0], v = edges[i][1], w = edges[i][2]; + adj[u].push_back({v,w}); + adj[v].push_back({u,w}); + } + + depth[0] = 0; + distRoot[0] = 0; + dfs(0, 0); + + for(int k = 1; k <= LOGN; k++) { + for(int v = 0; v < n; v++) { + up[v][k] = up[up[v][k-1]][k-1]; + } + } + + vectorrets; + for (auto& q: queries) + { + int u = q[0], v = q[1]; + int c = lca(u,v); + ll total = dist(u,c)+dist(c,v); + + int step1 = depth[u]-depth[c]; + int step2 = depth[v]-depth[c]; + + int low = 0, high = step1+step2; + int k; + while (low < high) { + int mid = low + (high-low)/2; + ll d; + if (mid <= step1) { + k = stepUp(u, mid); + d = distRoot[u] - distRoot[k]; + } else { + k = stepUp(v, step2 - (mid-step1)); + d = total - (distRoot[v] - distRoot[k]); + } + if (d >= total*0.5) + high = mid; + else + low = mid+1; + } + int step = low; + if (step<=step1) + rets.push_back(stepUp(u, step)); + else + rets.push_back(stepUp(v, step2-(step-step1))); + } + + return rets; + } +}; diff --git a/Binary_Search/3585.Find-Weighted-Median-Node-in-Tree/Readme.md b/Binary_Search/3585.Find-Weighted-Median-Node-in-Tree/Readme.md new file mode 100644 index 000000000..87b7b21b1 --- /dev/null +++ b/Binary_Search/3585.Find-Weighted-Median-Node-in-Tree/Readme.md @@ -0,0 +1,7 @@ +### 3585.Find-Weighted-Median-Node-in-Tree + +对于任何一个query,我们只需要找到u到v路径(途中经过LCA的点记作c),假设路径的总步长是d,路径的总权重和是total。我们只需要在[0,d]之间进行二分搜索一个合适的步数k:即从u走k步,恰好走过的路径长度超过total的一半。 + +注意,我们在二分搜索对k进行判定的时候,需要分类讨论k是否在u到c的路径上,还是c到v的路径上。即看是否`dist(u,c) >= total * 0.5`. 如果k是在u到c的路径上,那么经过的路径长度就是dist(u,k)。如果k是在c到v的路径上,那么经过的路径长度就是dist(u,c)+dist(c,k)。 + +根据binary lifting的算法,树里任意两个节点之间的距离都可以用log(n)的时间求解。 diff --git a/Binary_Search/3639.Minimum-Time-to-Activate-String/3639.Minimum-Time-to-Activate-String.cpp b/Binary_Search/3639.Minimum-Time-to-Activate-String/3639.Minimum-Time-to-Activate-String.cpp new file mode 100644 index 000000000..cf17556d5 --- /dev/null +++ b/Binary_Search/3639.Minimum-Time-to-Activate-String/3639.Minimum-Time-to-Activate-String.cpp @@ -0,0 +1,43 @@ +using ll = long long; +class Solution { +public: + int minTime(string s, vector& order, int k) { + int n = s.size(); + vectorisStar(n, false); + ll T = ll(n)*(n+1)/2; + if (k>T) return -1; + + auto check = [&](int mid) { + fill(isStar.begin(), isStar.end(), false); + for (int i=0; i<=mid; i++) + isStar[order[i]] = true; + + ll sumNon = 0; + for (int i=0; i= k; + }; + + int lo = 0, hi = n-1, ans = -1; + while (lo < hi) { + int mid = lo + (hi-lo)/2; + if (check(mid)) { + hi = mid; + } else { + lo = mid+1; + } + } + if (check(hi)) return hi; + else return -1; + } +}; diff --git a/Binary_Search/3639.Minimum-Time-to-Activate-String/Readme.md b/Binary_Search/3639.Minimum-Time-to-Activate-String/Readme.md new file mode 100644 index 000000000..7f198d6f2 --- /dev/null +++ b/Binary_Search/3639.Minimum-Time-to-Activate-String/Readme.md @@ -0,0 +1,5 @@ +### 3639.Minimum-Time-to-Activate-String + +非常明显的二分搜值。假设运行到某个时刻t,那么我们就得到一个包含若干星号的字符串。我们需要考察该字符串里至少包含一个星号的substring的个数是否超过k。超过的话,就可以尝试减少k,否则需要增加k。 + +计算“至少包含一个星号的substring的个数”,等效于反向计算“没有任何星号的substring的个数”,并且后者更容易计算。对于任何一段连续的、不包含任何星号的子串长度p,那么就有p*(p+1)/2个子串符合条件。我们分割原始字符串为若干段“没有任何星号的区间”,分别计算再相加即可。 diff --git a/Binary_Search/3677.Count-Binary-Palindromic-Numbers/3677.Count-Binary-Palindromic-Numbers.cpp b/Binary_Search/3677.Count-Binary-Palindromic-Numbers/3677.Count-Binary-Palindromic-Numbers.cpp new file mode 100644 index 000000000..f4c66322c --- /dev/null +++ b/Binary_Search/3677.Count-Binary-Palindromic-Numbers/3677.Count-Binary-Palindromic-Numbers.cpp @@ -0,0 +1,57 @@ +using LL = long long; +class Solution { +public: + LL reverseBits(LL x) { + LL r = 0; + while (x>0) { + r = r*2+(x&1); + x>>=1; + } + return r; + } + + LL build(LL half, int L) { + int h = (L+1)/2; + int k = L-h; + if (L%2==0) { + return (half<>1); + } + } + + int countBinaryPalindromes(long long n) { + if (n==0) return 1; + int maxLen = floor(log2(n)) + 1; + + LL ret = 1; + for (int L=1; Lmx`的情况,所以需要对收敛的解做二次验证。 + diff --git a/Binary_Search/373.Find-K-Pairs-with-Smallest-Sums/373.Find-K-Pairs-with-Smallest-Sums.cpp b/Binary_Search/373.Find-K-Pairs-with-Smallest-Sums/373.Find-K-Pairs-with-Smallest-Sums.cpp index 9cc7fbfe6..4d4237982 100644 --- a/Binary_Search/373.Find-K-Pairs-with-Smallest-Sums/373.Find-K-Pairs-with-Smallest-Sums.cpp +++ b/Binary_Search/373.Find-K-Pairs-with-Smallest-Sums/373.Find-K-Pairs-with-Smallest-Sums.cpp @@ -1,45 +1,36 @@ -class Solution { - struct cmp - { - bool operator()(paira,pairb) - { - return a.first>b.first; - } - }; +using AI3 = array; +class Solution { public: - vector> kSmallestPairs(vector& nums1, vector& nums2, int k) + vector> kSmallestPairs(vector& nums1, vector& nums2, int k) { - priority_queue,vector>,cmp>q; - int M=nums1.size(); - int N=nums2.size(); - vector>results; - if (M==0 || N==0) return results; + priority_queue, greater<>>pq; + int m=nums1.size(); + int n=nums2.size(); + vector>rets; - auto used=vector>(M,vector(N,0)); - q.push({nums1[0]+nums2[0],0}); - used[0][0]=1; + set>Set; + pq.push({nums1[0]+nums2[0], 0, 0}); + Set.insert({0,0}); - int count=0; - while (count0) + + while (rets.size() < k && pq.size()>0) { - int m=q.top().second/N; - int n=q.top().second%N; - results.push_back({nums1[m],nums2[n]}); - count++; - q.pop(); - - if (m+1& arr, int target) { - unordered_sets; + int closestToTarget(vector& arr, int target) + { + setSet, temp; int ret = INT_MAX; - for (int i=0; is2; - for (auto x: s) - s2.insert(x&arr[i]); - s2.insert(arr[i]); - for (auto x: s2) - ret = min(ret, abs(x-target)); - s = s2; + for (auto y: Set) + temp.insert(y&x); + temp.insert(x); + + for (int y: temp) + ret = min(ret, abs(y-target)); + + Set = temp; + temp.clear(); } + return ret; } }; diff --git a/Bit_Manipulation/2397.Maximum-Rows-Covered-by-Columns/2397.Maximum-Rows-Covered-by-Columns.cpp b/Bit_Manipulation/2397.Maximum-Rows-Covered-by-Columns/2397.Maximum-Rows-Covered-by-Columns.cpp new file mode 100644 index 000000000..5c3c25cea --- /dev/null +++ b/Bit_Manipulation/2397.Maximum-Rows-Covered-by-Columns/2397.Maximum-Rows-Covered-by-Columns.cpp @@ -0,0 +1,36 @@ +class Solution { +public: + int maximumRows(vector>& mat, int cols) + { + int m = mat.size(); + int n = mat[0].size(); + + vectornums; + for (int i=0; i> 2) / c) | r; + } + + return ret; + + } +}; diff --git a/Bit_Manipulation/2397.Maximum-Rows-Covered-by-Columns/Readme.md b/Bit_Manipulation/2397.Maximum-Rows-Covered-by-Columns/Readme.md new file mode 100644 index 000000000..f6696b6d6 --- /dev/null +++ b/Bit_Manipulation/2397.Maximum-Rows-Covered-by-Columns/Readme.md @@ -0,0 +1,5 @@ +### 2397.Maximum-Rows-Covered-by-Columns + +考虑到总的列数不超过12,枚举所有的列的选择都是可行的。对于一种固定的列的组合,我们记成二进制数state,先排除掉那些bit 1的个数不等于cols的。然后我们只需要查看每一行对应的二进制数row是否是state的子集即可,即`(state&row) == row`. 我们最后选择一个能cover最多row的state。 + +此外,我们可以用gosper's hack来提高效率,只枚举那些bit 1的个数等于cols的state。 diff --git a/Bit_Manipulation/2505.Bitwise-OR-of-All-Subsequence-Sums/2505.Bitwise-OR-of-All-Subsequence-Sums.cpp b/Bit_Manipulation/2505.Bitwise-OR-of-All-Subsequence-Sums/2505.Bitwise-OR-of-All-Subsequence-Sums.cpp new file mode 100644 index 000000000..cdbbeed7c --- /dev/null +++ b/Bit_Manipulation/2505.Bitwise-OR-of-All-Subsequence-Sums/2505.Bitwise-OR-of-All-Subsequence-Sums.cpp @@ -0,0 +1,16 @@ +using LL = long long; +class Solution { +public: + long long subsequenceSumOr(vector& nums) + { + LL ret = 0; + LL sum = 0; + for (int x: nums) + { + ret = ret | x; + sum += x; + ret = ret | sum; + } + return ret; + } +}; diff --git a/Bit_Manipulation/2505.Bitwise-OR-of-All-Subsequence-Sums/Readme.md b/Bit_Manipulation/2505.Bitwise-OR-of-All-Subsequence-Sums/Readme.md new file mode 100644 index 000000000..5108f5102 --- /dev/null +++ b/Bit_Manipulation/2505.Bitwise-OR-of-All-Subsequence-Sums/Readme.md @@ -0,0 +1,9 @@ +### 2505.Bitwise-OR-of-All-Subsequence-Sums + +首先我们知道一个大方向:OR的操作越多,就有越多的bit能被置1. + +因为单个元素也属于一个subsequence,所以可以将所有的nums[i]都OR起来。即如果任何一个元素的任何一个bit位上是1,那么最终答案里该bit上也一定是1. + +那么如果某个bit位上,没有任何一个现成的nums[i]是1,那么怎么判定呢?因为还存在一种可能,就是某些元素(即subsequence)的加和,恰好使得在该bit位上因为进位从而置1. 我们如何知道是否存在这样的进位呢?为了判定这样的“进位”是否会发生,我们就尽可能多地进行加法操作即可。极端一点,就是将所有元素都加起来,查看这个这个过程中,该bit位上是否曾经出现过1. 如果始终都没有出现过1,那么意味着任何一个subsequence的加和也不会在该bit位产生进位的1. + +所以最终的答案,就是将nums[i]和所有presum[i]进行OR操作即可。 diff --git a/Bit_Manipulation/2527.Find-Xor-Beauty-of-Array/2527.Find-Xor-Beauty-of-Array.cpp b/Bit_Manipulation/2527.Find-Xor-Beauty-of-Array/2527.Find-Xor-Beauty-of-Array.cpp new file mode 100644 index 000000000..d9ba6f743 --- /dev/null +++ b/Bit_Manipulation/2527.Find-Xor-Beauty-of-Array/2527.Find-Xor-Beauty-of-Array.cpp @@ -0,0 +1,11 @@ +class Solution { +public: + int xorBeauty(vector& nums) + { + int sum = 0; + for (int x: nums) + sum ^= x; + return sum; + + } +}; diff --git a/Bit_Manipulation/2527.Find-Xor-Beauty-of-Array/Readme.md b/Bit_Manipulation/2527.Find-Xor-Beauty-of-Array/Readme.md new file mode 100644 index 000000000..0a7137376 --- /dev/null +++ b/Bit_Manipulation/2527.Find-Xor-Beauty-of-Array/Readme.md @@ -0,0 +1,11 @@ +### 2527.Find-Xor-Beauty-of-Array + +因为bit之间彼此互不影响,所以我们这里仅考虑一个bit位的情况。 + +我们要把所有可能的`(a|b)&c`进行异或。这里要注意,异或的本质是就是bit 1的个数。如果有奇数个bit 1进行异或,答案就是1;否则答案就是0. 所以在这里,我们只需要考虑数值为1的c。数值为0的c,只会贡献`(a|b)&c=0`,不影响结果。 + +当我们固定了c,接下来考虑所有`a|b`的配对,我们需要将这些0或1都异或起来。假设有总共有n个元素,其中有x个元素在这个bit上是1,剩下y个元素在这个bit上是0,那么我们选取到`a|b=1`的个数就是`(n^2-y^2)`个(即排除掉选到两个0的可能性)。我们发现`(n^2-y^2)=(n+y)(n-y)=(n+y)x=(2n-x)x`,说明`(n^2-y^2)`的奇偶性取决于x的奇偶性。也就是说,如果在这个bit位上有奇数个1,那么就有奇数个`a|b`的值是1,它们异或的结果就是1. 反之,它们异或的结果就是0. 如果用一个公示表达,那就是等于将所有元素的该bit都异或起来。 + +以上分析是针对固定的c。那么遍历所有的c,会遇到类似的情况。如果固定c对应的`sum(a|b)`是1,那么说明有奇数个c是1,再将奇数个答案异或起来,那还是1. 如果固定c对应的答案是0,那么说明所有的`sum(a|b)`都是0,最终答案还是0. + +所以最终答案只需要将所有元素异或起来即可。 diff --git a/Bit_Manipulation/260.Single-Number-III/260.Single-Number-III.cpp b/Bit_Manipulation/260.Single-Number-III/260.Single-Number-III.cpp index d5a308b60..bbfea14a2 100644 --- a/Bit_Manipulation/260.Single-Number-III/260.Single-Number-III.cpp +++ b/Bit_Manipulation/260.Single-Number-III/260.Single-Number-III.cpp @@ -2,9 +2,9 @@ class Solution { public: vector singleNumber(vector& nums) { - int s = 0; + long long s = 0; for (auto n:nums) s = s^n; // i.e. a^b - int t = s^(s&(s-1)); // only keep the rightmost set bit + long long t = s^(s&(s-1)); // only keep the rightmost set bit int a = 0, b = 0; for (auto n:nums) { diff --git a/Bit_Manipulation/2680.Maximum-OR/2680.Maximum-OR.cpp b/Bit_Manipulation/2680.Maximum-OR/2680.Maximum-OR.cpp new file mode 100644 index 000000000..60d85e143 --- /dev/null +++ b/Bit_Manipulation/2680.Maximum-OR/2680.Maximum-OR.cpp @@ -0,0 +1,38 @@ +using LL = long long; +class Solution { +public: + long long maximumOr(vector& nums, int k) + { + vectorcount(32); + + for (int i = 0; i< nums.size(); i++) + { + for (int j=0; j<=31; j++) + { + if ((nums[i]>>j)&1) + count[j]++; + } + } + + LL ret = 0; + for (int i = 0; i< nums.size(); i++) + { + auto temp = count; + for (int j=0; j<=31; j++) + { + if ((nums[i]>>j)&1) + temp[j]--; + } + LL ans = 0; + for (int j=0; j<=31; j++) + { + if (temp[j]>0) + ans += (1<0) + { + if (x%2==0) + ret.push_back('4'); + else + ret.push_back('7'); + x/=2; + } + ret.pop_back(); + reverse(ret.begin(), ret.end()); + return ret; + } +}; diff --git a/Bit_Manipulation/2802.Find-The-K-th-Lucky-Number/Readme.md b/Bit_Manipulation/2802.Find-The-K-th-Lucky-Number/Readme.md new file mode 100644 index 000000000..93eb946a7 --- /dev/null +++ b/Bit_Manipulation/2802.Find-The-K-th-Lucky-Number/Readme.md @@ -0,0 +1,13 @@ +### 2802.Find-The-K-th-Lucky-Number + +这是一个常见的技巧。Lucky Number仅由两个digit组成,所以它是"4"与"7",还是“0”与“1”,没有本质区别。我们索性就利用二进制数来构造第k大的01序列。 + +因为任何二进制数都没有先导零,第一位总是1。所以我们排除所有二进制数的第一个bit 1,剩余的bit位恰好就构成了递增的01序列。举例如下: +``` +2: 10 -> 0 +3: 11 -> 1 +4: 100 -> 00 +5: 101 -> 01 +6: 110 -> 10 +``` +我们从2开始枚举自然数,得到其二进制表达式,去掉先导1,剩余的部分就是递增的01序列。我们将其替换为“4”“7”序列即可。 diff --git a/Bit_Manipulation/2992.Number-of-Self-Divisible-Permutations/2992.Number-of-Self-Divisible-Permutations.cpp b/Bit_Manipulation/2992.Number-of-Self-Divisible-Permutations/2992.Number-of-Self-Divisible-Permutations.cpp new file mode 100644 index 000000000..a281fd721 --- /dev/null +++ b/Bit_Manipulation/2992.Number-of-Self-Divisible-Permutations/2992.Number-of-Self-Divisible-Permutations.cpp @@ -0,0 +1,20 @@ +class Solution { + int dp[13][4096]; +public: + int selfDivisiblePermutationCount(int n) + { + int state = 0; + dp[0][0] = 1; + for (int i=1; i<=n; i++) + for (int state = 0; state<(1<>(d-1))&1)==0) continue; + dp[i][state] += dp[i-1][state-(1<<(d-1))]; + } + } + return dp[n][(1<>(d-1))&1)==0) continue; + dp[i][state] += dp[i-1][state-(1<<(d-1))]; +} +``` +最终返回dp[n][(1<& coins, int k) + { + LL left = 1, right = 51e9; + while (left < right) + { + LL mid = left+(right-left)/2; + if (countNumber(mid, coins) >= k) + right = mid; + else + left = mid+1; + } + return left; + } + + LL countNumber(LL M, vector& coins) + { + int m = coins.size(); + + LL ret = 0; + int sign = 1; + + for (int k=1; k<=m; k++) + { + LL sum = 0; + int state = (1 << k) - 1; + while (state < (1 << m)) + { + LL LCM = 1; + for (int i=0; i>i)&1) + LCM = lcm(LCM, coins[i]); + } + sum += M / LCM; + + int c = state & - state; + int r = state + c; + state = (((r ^ state) >> 2) / c) | r; + } + + ret += sum * sign; + sign *= -1; + } + + return ret; + } +}; diff --git a/Bit_Manipulation/3116.Kth-Smallest-Amount-With-Single-Denomination-Combination/Readme.md b/Bit_Manipulation/3116.Kth-Smallest-Amount-With-Single-Denomination-Combination/Readme.md new file mode 100644 index 000000000..78f967265 --- /dev/null +++ b/Bit_Manipulation/3116.Kth-Smallest-Amount-With-Single-Denomination-Combination/Readme.md @@ -0,0 +1,9 @@ +### 3116.Kth-Smallest-Amount-With-Single-Denomination-Combination + +本题就是求第k个能被coins里任意一个元素整除的自然数。 + +因为我们无法枚举每个符合条件的自然数直至第k个,我们只能用二分搜值的框架。猜测一个M,计算M以内符合条件的自然数有多少,多了就降低M,少了就抬升M,直至找到恰好的M。 + +那么如何计算M以内、能被coins里任意一个元素整除的自然数的个数呢?显然我们会用容斥原理。`能被任意一个元素整除的个数 = sum(能被每一个元素整除的个数) - sum(能被每两个元素同时整除的个数) + sum(能被每三个元素同时整除的个数) - sum(能被每四个元素同时整除的个数) + ...` + +举个例子,能被a,b,c三个元素同时整除的个数,是 `M / lcm(a,b,c)`,其中lcm是三者的最小公倍数。如果想在m个元素里,枚举任意三个元素的组合,那么我们会用gospher's hack,高效枚举中一个长度为m的二进制数里含有三个bit 1的mask。 diff --git a/Bit_Manipulation/3133.Minimum-Array-End/3133.Minimum-Array-End.cpp b/Bit_Manipulation/3133.Minimum-Array-End/3133.Minimum-Array-End.cpp new file mode 100644 index 000000000..c9f7746bb --- /dev/null +++ b/Bit_Manipulation/3133.Minimum-Array-End/3133.Minimum-Array-End.cpp @@ -0,0 +1,44 @@ +using LL = long long; +class Solution { +public: + long long minEnd(int n, int x) + { + LL m = n-1; + vectornum; + while (m>0) + { + num.push_back(m%2); + m/=2; + } + + vectorbits; + while (x>0) + { + bits.push_back(x%2); + x/=2; + } + + int j = 0; + for (int i=0; i=0; j--) + ret = ret*2+bits[j]; + + return ret; + + } +}; diff --git a/Bit_Manipulation/3133.Minimum-Array-End/Readme.md b/Bit_Manipulation/3133.Minimum-Array-End/Readme.md new file mode 100644 index 000000000..e5a924d50 --- /dev/null +++ b/Bit_Manipulation/3133.Minimum-Array-End/Readme.md @@ -0,0 +1,9 @@ +### 3133.Minimum-Array-End + +本题要求构造一个严格递增的、长度为n的序列,使得其bitwise AND的结果是x。问序列的最大元素可以是多少。 + +因为对一个序列做bitwise AND操作,最终结果不可能大于其中最小的元素。所以首元素不可能比x更小,否则总的bitwise AND不可能是x。最理想的情况就是将x作为序列的首元素,序列的其余元素都比x大。 + +为了保证bitwise AND最终结果是x,对于x里属于1的那些bit位,序列里的任何元素在这些二进制位置都不能是0。否则最终答案在该位置上成为了0,必然比x小。我们唯一能自由操作的就是那些属于0的bit位。 + +总结算法:我们将x进行二进制分解。保持那些属于1的bit位不变。这样我们可以构造长度为n的最小序列:0,1,2,...,n-1。其中将最大元素n-1进行二进制拆解,但是填充在x的那些属于0的bit位上。这样就得到了答案。 diff --git a/Bit_Manipulation/3171.Find-Subarray-With-Bitwise-AND-Closest-to-K/3171.Find-Subarray-With-Bitwise-AND-Closest-to-K_v1.cpp b/Bit_Manipulation/3171.Find-Subarray-With-Bitwise-AND-Closest-to-K/3171.Find-Subarray-With-Bitwise-AND-Closest-to-K_v1.cpp new file mode 100644 index 000000000..2793c6d98 --- /dev/null +++ b/Bit_Manipulation/3171.Find-Subarray-With-Bitwise-AND-Closest-to-K/3171.Find-Subarray-With-Bitwise-AND-Closest-to-K_v1.cpp @@ -0,0 +1,21 @@ +class Solution { +public: + int minimumDifference(vector& nums, int k) + { + unordered_setSet, temp; + int ret = INT_MAX; + for (int x: nums) + { + for (int y: Set) + temp.insert(y | x); + temp.insert(x); + + for (int y: temp) + ret = min(ret, abs(y-k)); + + Set = temp; + temp.clear(); + } + return ret; + } +}; diff --git a/Bit_Manipulation/3171.Find-Subarray-With-Bitwise-AND-Closest-to-K/3171.Find-Subarray-With-Bitwise-AND-Closest-to-K_v2.cpp b/Bit_Manipulation/3171.Find-Subarray-With-Bitwise-AND-Closest-to-K/3171.Find-Subarray-With-Bitwise-AND-Closest-to-K_v2.cpp new file mode 100644 index 000000000..db5e8b020 --- /dev/null +++ b/Bit_Manipulation/3171.Find-Subarray-With-Bitwise-AND-Closest-to-K/3171.Find-Subarray-With-Bitwise-AND-Closest-to-K_v2.cpp @@ -0,0 +1,94 @@ +class SegmentTree { +private: + vector tree; + int n; + + void build(vector& nums, int node, int start, int end) + { + if (start == end) { + tree[node] = nums[start]; + } else { + int mid = (start + end) / 2; + build(nums, 2 * node, start, mid); + build(nums, 2 * node + 1, mid + 1, end); + tree[node] = tree[2 * node] & tree[2 * node + 1]; + } + } + + void update(int node, int start, int end, int L, int R, int val) + { + if (R < start || end < L) { + return; + } + if (L <= start && end <= R) { + tree[node] = val; + return; + } + int mid = (start + end) / 2; + update(2 * node, start, mid, L, R, val); + update(2 * node + 1, mid + 1, end, L, R, val); + tree[node] = tree[2 * node] & tree[2 * node + 1]; + } + + int query(int node, int start, int end, int L, int R) + { + if (R < start || end < L) { + return INT_MAX; // Identity for AND operation (all bits set) + } + if (L <= start && end <= R) { + return tree[node]; + } + int mid = (start + end) / 2; + int leftAnd = query(2 * node, start, mid, L, R); + int rightAnd = query(2 * node + 1, mid + 1, end, L, R); + return leftAnd & rightAnd; + } + +public: + SegmentTree(vector& nums) { + n = nums.size(); + tree.resize(4 * n, 0); + build(nums, 1, 0, n - 1); + } + + void rangeUpdate(int L, int R, int val) { + update(1, 0, n - 1, L, R, val); + } + + int rangeAnd(int L, int R) { + return query(1, 0, n - 1, L, R); + } +}; + +class Solution { +public: + int minimumDifference(vector& nums, int k) + { + int n = nums.size(); + SegmentTree segTree(nums); + int ret = INT_MAX; + + for (int i = 0; i < n; ++i) + { + int low = i, high = n - 1; + while (low < high) + { + int mid = high - (high - low)/2; + if (segTree.rangeAnd(i, mid) >= k) + low = mid; + else + high = mid-1; + } + + int ret1 = abs(segTree.rangeAnd(i, low) - k); + int ret2 = INT_MAX; + if (low+1& nums, int k) + { + map mp, temp; + long long ans = 0; + for(int x: nums) + { + for(auto& [k,v]: mp) + temp[k & x] += v; + temp[x]++; + + if(temp.find(k) != temp.end()) + ans += temp[k]; + + mp = temp; + temp.clear(); + } + return ans; + } +}; diff --git a/Bit_Manipulation/3209.Number-of-Subarrays-With-AND-Value-of-K/3209.Number-of-Subarrays-With-AND-Value-of-K_v2.cpp b/Bit_Manipulation/3209.Number-of-Subarrays-With-AND-Value-of-K/3209.Number-of-Subarrays-With-AND-Value-of-K_v2.cpp new file mode 100644 index 000000000..ba13f016e --- /dev/null +++ b/Bit_Manipulation/3209.Number-of-Subarrays-With-AND-Value-of-K/3209.Number-of-Subarrays-With-AND-Value-of-K_v2.cpp @@ -0,0 +1,108 @@ +using LL = long long; +class SegmentTree { +private: + vector tree; + int n; + + void build(vector& nums, int node, int start, int end) + { + if (start == end) { + tree[node] = nums[start]; + } else { + int mid = (start + end) / 2; + build(nums, 2 * node, start, mid); + build(nums, 2 * node + 1, mid + 1, end); + tree[node] = tree[2 * node] & tree[2 * node + 1]; + } + } + + void update(int node, int start, int end, int L, int R, int val) + { + if (R < start || end < L) { + return; + } + if (L <= start && end <= R) { + tree[node] = val; + return; + } + int mid = (start + end) / 2; + update(2 * node, start, mid, L, R, val); + update(2 * node + 1, mid + 1, end, L, R, val); + tree[node] = tree[2 * node] & tree[2 * node + 1]; + } + + int query(int node, int start, int end, int L, int R) + { + if (R < start || end < L) { + return INT_MAX; // Identity for AND operation (all bits set) + } + if (L <= start && end <= R) { + return tree[node]; + } + int mid = (start + end) / 2; + int leftAnd = query(2 * node, start, mid, L, R); + int rightAnd = query(2 * node + 1, mid + 1, end, L, R); + return leftAnd & rightAnd; + } + +public: + SegmentTree(vector& nums) { + n = nums.size(); + tree.resize(4 * n, 0); + build(nums, 1, 0, n - 1); + } + + void rangeUpdate(int L, int R, int val) { + update(1, 0, n - 1, L, R, val); + } + + int rangeAnd(int L, int R) { + return query(1, 0, n - 1, L, R); + } +}; + +class Solution { +public: + long long countSubarrays(vector& nums, int k) + { + int n = nums.size(); + SegmentTree segTree(nums); + LL ret = 0; + + for (int i=0; ik) + left = mid+1; + else + right = mid; + } + if (segTree.rangeAnd(i,left)==k) + a = left; + + left = i, right = n-1; + while (left < right) + { + int mid = right-(right-left)/2; + if (segTree.rangeAnd(i,mid)& A) + { + unordered_setSet; + unordered_setAll; + for (int x: A) + { + unordered_settemp; + for (int y: Set) + { + temp.insert(y | x); + All.insert(y | x); + } + temp.insert(x); + All.insert(x); + Set = temp; + + } + return All.size(); + } +}; diff --git a/Bit_Manipulation/898.Bitwise-ORs-of-Subarrays/Readme.md b/Bit_Manipulation/898.Bitwise-ORs-of-Subarrays/Readme.md new file mode 100644 index 000000000..987e3a92b --- /dev/null +++ b/Bit_Manipulation/898.Bitwise-ORs-of-Subarrays/Readme.md @@ -0,0 +1,7 @@ +### 898.Bitwise-ORs-of-Subarrays + +本题的突破口在于,OR的结果不会特别多。任何一个元素作为左端点的时候,随着右端点的移动,OR的结果只会是单调地变大(某个bit的0变成1),因此最多只有32种可能。同理,以任何一个元素可以作为右端点,那么所有subarray的OR结果做多也是32种。 + +因此我们可以用类似背包问题的算法,穷举上一个元素为右端点时所有subarray的OR值,来更新该元素为右端点时所有subarray的OR值。 + +最终的答案是o(32N)数量级,因为可以全部存下来。 diff --git a/DFS/037.Sudoku-Solver/037.Sudoku-Solver.cpp b/DFS/037.Sudoku-Solver/037.Sudoku-Solver.cpp index 2c831d805..24c97459d 100644 --- a/DFS/037.Sudoku-Solver/037.Sudoku-Solver.cpp +++ b/DFS/037.Sudoku-Solver/037.Sudoku-Solver.cpp @@ -11,7 +11,7 @@ class Solution { if (j==9) return DFS(board, i+1, 0); if (board[i][j]!='.') return DFS(board, i, j+1); - for (int k='1'; k<='9'; k++) + for (char k='1'; k<='9'; k++) { if (!isValid(board, i, j, k)) continue; board[i][j]=k; diff --git a/DFS/1192.Critical-Connections-in-a-Network/Readme.md b/DFS/1192.Critical-Connections-in-a-Network/Readme.md index 64537cb6b..94a2c7e37 100644 --- a/DFS/1192.Critical-Connections-in-a-Network/Readme.md +++ b/DFS/1192.Critical-Connections-in-a-Network/Readme.md @@ -6,11 +6,11 @@ 简单地说,我们可以以任意一个未访问过的节点作为根节点,用DFS的顺序来进行搜索,即永远深度优先,然后回溯再搜索其他分支。如果碰到访问过的节点,就停止,保证不行成环。 -我们在dfs的过程中维护两个数组,一个是dfs[u],表示节点u被第一次访问时的顺序(可以理解为时间戳),这个是唯一且不变的量。另一个数组low[u]比较关键,初始的时候```low[u]=dfn[u]```。我们以u为节点的开始dfs(注意抵达u之前可能还有u的父节点,但我们dfs的时候不走回头路),想象它最终形成一棵搜索树,那么u的所有子节点中止的条件不外乎有两个:一个是走进了死胡同;另一个就是遇到了已经访问过的节点,特别的,这个已经访问过的节点有可能是u的祖先节点!所以,有了这样的搜索树之后,low[u]可以有机会更新为它所有的子节点v可以接触到的最小时间戳low[v]。 +我们在dfs的过程中维护两个数组,一个是dfn[u],表示节点u被第一次访问时的顺序(可以理解为时间戳),这个是唯一且不变的量。另一个数组low[u]比较关键,初始的时候```low[u]=dfn[u]```。我们以u为节点的开始dfs(注意抵达u之前可能还有u的父节点,但我们dfs的时候不走回头路),想象它最终形成一棵搜索树,那么u的所有子节点中止的条件不外乎有两个:一个是走进了死胡同;另一个就是遇到了已经访问过的节点,特别的,这个已经访问过的节点有可能是u的祖先节点!所以,有了这样的搜索树之后,low[u]可以有机会更新为它所有的子节点v可以接触到的最小时间戳low[v]。 令v是u的一个子节点,且有```low[v]>dfn[u]```,这说明什么呢?说明从v出发最终无法绕道u的前面去。因此(v,u)就是割边。如果消除了这条边,v及其子树就是一个孤岛,无法与u或u的祖先相通。同理,如果```low[v]>=dfn[u]```,说明u是一个割点,如果消除了这个点,那么v及其子树也是一个孤岛。 本题中我们还设置了一个parent,其实是为了标记dfs过程中的搜索顺序。因为无向图```for auto v: next[u]```的遍历过程中,v可能是u的父节点,这种情况下v其实不能作为从u开始dfs的下一个目的地(否则就是走回头路了),所以得排除。 -[Leetcode Link](https://leetcode.com/problems/critical-connections-in-a-network) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/critical-connections-in-a-network) diff --git a/DFS/1778.Shortest-Path-in-a-Hidden-Grid/1778.Shortest-Path-in-a-Hidden-Grid.cpp b/DFS/1778.Shortest-Path-in-a-Hidden-Grid/1778.Shortest-Path-in-a-Hidden-Grid.cpp index 10cf6e11f..607d96ac8 100644 --- a/DFS/1778.Shortest-Path-in-a-Hidden-Grid/1778.Shortest-Path-in-a-Hidden-Grid.cpp +++ b/DFS/1778.Shortest-Path-in-a-Hidden-Grid/1778.Shortest-Path-in-a-Hidden-Grid.cpp @@ -12,17 +12,17 @@ typedef pair PII; class Solution { - int grid[1000][1000]; - int visited[1000][1000]; - int visited2[1000][1000]; + int grid[1001][1001]; + int visited[1001][1001]; + int visited2[1001][1001]; + vector dir{{-1,0},{1,0},{0,-1},{0,1}}; + vector move{'U','D','L','R'}; public: int findShortestPath(GridMaster &master) { visited[500][500] = 1; dfs(500, 500, master); - - auto dir = vector({{-1,0},{1,0},{0,-1},{0,1}}); - + queueq; q.push({500,500}); visited2[500][500]=1; @@ -56,8 +56,6 @@ class Solution { void dfs(int i, int j, GridMaster &master) { - auto dir = vector({{-1,0},{1,0},{0,-1},{0,1}}); - vector move({'U','D','L','R'}); grid[i][j] = 1; diff --git a/DFS/1815.Maximum-Number-of-Groups-Getting-Fresh-Donuts/Readme.md b/DFS/1815.Maximum-Number-of-Groups-Getting-Fresh-Donuts/Readme.md index ba980904b..47e84d952 100644 --- a/DFS/1815.Maximum-Number-of-Groups-Getting-Fresh-Donuts/Readme.md +++ b/DFS/1815.Maximum-Number-of-Groups-Getting-Fresh-Donuts/Readme.md @@ -20,7 +20,7 @@ return ret + bonus; } ``` -上面的```dfs(count, presum, i)```表示我们已经选择了i-1个groups(它们的前缀和是presum、已有的得分是prescore),我们从剩下的groups挑选一个安排在第i个。选哪个好呢?我们不知道,必须每种可能都尝试一次,结合相应的```dfs(..., i+1)```来判断。这里需要注意的是,如果此时的presum恰好被batch整除,那么说明无论第i个元素取谁,我们都可以得到1分,所以下次递归的时候perscore可以增加1。 +上面的```dfs(count, presum, i)```表示我们已经选择了i-1个groups(它们的前缀和是presum,哪些被选择了记录在count里),我们从剩下的groups挑选一个安排在第i个。选哪个好呢?我们不知道,必须每种可能都尝试一次,结合相应的```dfs(..., i+1)```来判断。这里需要注意的是,如果此时的presum恰好被batch整除,那么说明无论第i个元素取谁,我们都可以得到1分。所以返回的答案就是下轮递归 里的最大值,再加本轮的1。 以上的解法自然会TLE,原因是什么呢?显然是没有记忆化。我们可以发现,dfs函数中,其实只要确定了当前的count(即未被安排的groups),其他的参数presum本质上就是确定了的。所以记忆化的key其实就是count。但是count是一个数组,如何将转化为一个方便的key呢?和状态压缩相同的原因。因为count[i]最多30个,用五个bit就能表示(0~32)。batch最多是9,所以总共45位的二进制数就可以表述count数组。这就要求这个key是long long类型。 diff --git a/DFS/2305.Fair-Distribution-of-Cookies/2305.Fair-Distribution-of-Cookies_v1.cpp b/DFS/2305.Fair-Distribution-of-Cookies/2305.Fair-Distribution-of-Cookies_v1.cpp new file mode 100644 index 000000000..10cb2bbc1 --- /dev/null +++ b/DFS/2305.Fair-Distribution-of-Cookies/2305.Fair-Distribution-of-Cookies_v1.cpp @@ -0,0 +1,29 @@ +class Solution { + int ret = INT_MAX; + int plan[8]; +public: + int distributeCookies(vector& cookies, int k) + { + dfs(cookies, k, 0); + return ret; + } + + void dfs(vector& cookies, int k, int curCookie) + { + if (curCookie == cookies.size()) + { + int mx = 0; + for (int i=0; i& cookies, int k) + { + sort(cookies.rbegin(), cookies.rend()); + + int left = 1, right = INT_MAX; + while (left < right) + { + for (int i=0; i& cookies, int limit, int k, int curCookie) + { + if (curCookie == cookies.size()) return true; + + int flag = 0; + for (int i=0; i limit) continue; + if (plan[i]==0) + { + if (flag==1) continue; + flag = 1; + } + + plan[i] += cookies[curCookie]; + if (dfs(cookies, limit, k, curCookie+1)) + return true; + plan[i] -= cookies[curCookie]; + } + return false; + } +}; diff --git a/DFS/2305.Fair-Distribution-of-Cookies/2305.Fair-Distribution-of-Cookies_v3.cpp b/DFS/2305.Fair-Distribution-of-Cookies/2305.Fair-Distribution-of-Cookies_v3.cpp new file mode 100644 index 000000000..f8740e76b --- /dev/null +++ b/DFS/2305.Fair-Distribution-of-Cookies/2305.Fair-Distribution-of-Cookies_v3.cpp @@ -0,0 +1,52 @@ +class Solution { + int plan[8]; +public: + int distributeCookies(vector& cookies, int k) + { + sort(cookies.rbegin(), cookies.rend()); + int n = cookies.size(); + + int left = 1, right = INT_MAX; + while (left < right) + { + for (int i=0; i& cookies, int limit, int k, int curPerson, int state) + { + if (curPerson == k) + { + return state == 0; + } + + for (int subset=state; subset>0; subset=(subset-1)&state) + { + int sum = getSum(cookies, subset); + if (sum > limit) continue; + if (dfs(cookies, limit, k, curPerson+1, state-subset)) + return true; + }; + + return false; + } + + int getSum(vector& cookies, int state) + { + int ret = 0; + for (int i=0; i>i)&1) + ret += cookies[i]; + } + return ret; + } +}; diff --git a/DFS/2305.Fair-Distribution-of-Cookies/Readme.md b/DFS/2305.Fair-Distribution-of-Cookies/Readme.md new file mode 100644 index 000000000..7d4fe9c47 --- /dev/null +++ b/DFS/2305.Fair-Distribution-of-Cookies/Readme.md @@ -0,0 +1,17 @@ +### 2305.Fair-Distribution-of-Cookies + +#### 解法1:常规dfs,遍历cookie +通过DFS遍历所有的分配方案。dfs的每一层处理一块cookie,分支考察分配给每个人的方案。总的时间复杂度就是o(k^N). + +#### 解法2:二分+dfs,遍历cookie +我们先二分搜索猜测一个答案t,然后用dfs来寻找是否存在一种分配方案,使得每个人能分到的饼干数量不超过t。最终二分逼近的答案就是所求的最优解。 + +dfs的原理同解法1. 此时,我们可以有很多剪枝策略: +1. 发现任何一个人的饼干总数已经大于t,就返回false +2. 将cookies从大到小排列,尽早排除那些容易溢出的分支。 +3. 如果某块饼干打算分发给某个没有得到饼干的人,那么就不需要平行地尝试分给其他没有得到饼干的人。 + +#### 解法3:二分+dfs,遍历人 +dfs的原理正好相反:每一层处理一个人,对于该人的饼干选配方案就是当前剩余饼干的子集。显然我们可以通过遍历子集的技巧,进行dfs的分支搜索。 + +同样,如果遍历子集时,发现某种分配方案会导致个人的饼干总数已经大于t,就终止这个探索。 diff --git a/DFS/2328.Number-of-Increasing-Paths-in-a-Grid/2328.Number-of-Increasing-Paths-in-a-Grid_v1.cpp b/DFS/2328.Number-of-Increasing-Paths-in-a-Grid/2328.Number-of-Increasing-Paths-in-a-Grid_v1.cpp new file mode 100644 index 000000000..42e676a60 --- /dev/null +++ b/DFS/2328.Number-of-Increasing-Paths-in-a-Grid/2328.Number-of-Increasing-Paths-in-a-Grid_v1.cpp @@ -0,0 +1,39 @@ +using AI3 = array; +using LL = long long; +LL M = 1e9+7; +class Solution { +public: + int countPaths(vector>& grid) + { + vectorarray; + int m = grid.size(), n = grid[0].size(); + for (int i=0; i>dp(m, vector(n, 0)); + vector>dir = {{1,0},{-1,0},{0,1},{0,-1}}; + + LL ret = 0; + for (auto& [v, i, j]: array) + { + LL sum = 0; + for (int k=0; k<4; k++) + { + int x = i+dir[k].first; + int y = j+dir[k].second; + if (x<0||x>=m||y<0||y>=n) continue; + if (grid[x][y] >= grid[i][j]) continue; + sum = (sum + dp[x][y]) % M; + } + sum = (sum + 1) % M; + dp[i][j] = sum; + ret = (ret + sum) % M; + } + + return ret; + + } +}; diff --git a/DFS/2328.Number-of-Increasing-Paths-in-a-Grid/2328.Number-of-Increasing-Paths-in-a-Grid_v2.cpp b/DFS/2328.Number-of-Increasing-Paths-in-a-Grid/2328.Number-of-Increasing-Paths-in-a-Grid_v2.cpp new file mode 100644 index 000000000..90587acd5 --- /dev/null +++ b/DFS/2328.Number-of-Increasing-Paths-in-a-Grid/2328.Number-of-Increasing-Paths-in-a-Grid_v2.cpp @@ -0,0 +1,34 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { + LL dp[1000][1000]; +public: + int countPaths(vector>& grid) + { + int m = grid.size(), n = grid[0].size(); + LL ret = 0; + for (int i=0; i>& grid, int i, int j) + { + int m = grid.size(), n = grid[0].size(); + if (dp[i][j]!=0) return dp[i][j]; + + dp[i][j] = 1; + vector>dir = {{1,0},{-1,0},{0,1},{0,-1}}; + for (int k=0; k<4; k++) + { + int x = i+dir[k].first; + int y = j+dir[k].second; + if (x<0||x>=m||y<0||y>=n) continue; + if (grid[x][y]>=grid[i][j]) continue; + dp[i][j] = (dp[i][j] + dfs(grid, x, y)) % M; + } + + return dp[i][j]; + } +}; diff --git a/DFS/2328.Number-of-Increasing-Paths-in-a-Grid/Readme.md b/DFS/2328.Number-of-Increasing-Paths-in-a-Grid/Readme.md new file mode 100644 index 000000000..6d7f85b3e --- /dev/null +++ b/DFS/2328.Number-of-Increasing-Paths-in-a-Grid/Readme.md @@ -0,0 +1,12 @@ +### 2328.Number-of-Increasing-Paths-in-a-Grid + +#### 解法1:动态规划 +考虑到所有元素的数目只有1e5,那么我们可以将其按照从小到大排序。然后依次访问这些元素(i,j),查看它四周的格子(x,y)是否有比它小的。是的话,就有```dp[i][j]+=dp[x][y]```,其中dp[i][j]表示以其为结尾的、符合条件的严格递增序列的数目。特别注意,(i,j)本身也可以是一个单元素的序列,所以dp[i][j]要加上1. + +最终的答案是将所有dp[i][j]加起来。因为符合条件的严格递增序列可以以任何位置结尾。 + +#### 解法2:DFS +事实上本题不需要排序。我们只需要DFS和记忆化搜索。我们令dfs(i,j)表示以(i,j)为起点的递增序列的个数。我们可以从任意一个位置(i,j)出发,寻找四周比其大的格子(x,y),然后递归调用dfs(x,y),将其结果再加在dfs(i,j)上即可。本题需要记忆化dfs的结果来避免重复调用。 + + +此题和[329.Longest-Increasing-Path-in-a-Matrix](https://github.com/wisdompeak/LeetCode/tree/master/DFS/329.Longest-Increasing-Path-in-a-Matrix)非常类似。 diff --git a/DFS/2597.The-Number-of-Beautiful-Subsets/2597.The-Number-of-Beautiful-Subsets.cpp b/DFS/2597.The-Number-of-Beautiful-Subsets/2597.The-Number-of-Beautiful-Subsets.cpp new file mode 100644 index 000000000..a1fe1ffac --- /dev/null +++ b/DFS/2597.The-Number-of-Beautiful-Subsets/2597.The-Number-of-Beautiful-Subsets.cpp @@ -0,0 +1,29 @@ +class Solution { +public: + int beautifulSubsets(vector& nums, int k) + { + return dfs(0, 0, nums, k) - 1; + } + + int dfs(int cur, int state, vector& nums, int k) + { + if (cur==nums.size()) return 1; + + int flag = 1; + for (int i=0; i>i)&1 && (nums[i]+k==nums[cur] || nums[i]-k==nums[cur])) + { + flag = 0; + break; + } + } + + int choose = dfs(cur+1, state+(1<& nums, int k) + { + unordered_mapcount; + for (int x:nums) + count[x]+=1; + + unordered_map>>Map; + for (auto [val,count]:count) + Map[val%k].push_back({val, count}); + + int ret = 1; + for (auto& [r,arr]: Map) + { + sort(arr.begin(), arr.end()); + + int take = 0, notake = 1; + for (int i=0; i>Map; +public: + int specialPerm(vector& nums) + { + n = nums.size(); + + for (int i=0; i>q)&1) continue; + ret += dfs(i+1, q, state+(1<& words) + { + return words[0].size() + dfs(1, words[0][0]-'a', words[0].back()-'a', words); + } + + // the minimum length to be added if we construct the first i words with start & end + int dfs(int i, int start, int end, vector& words) + { + if (i==words.size()) return 0; + if (memo[i][start][end]!=0) return memo[i][start][end]; + + int a = words[i][0]-'a', b = words[i].back()-'a'; + int len = words[i].size(); + int ret = INT_MAX/2; + + if (start==a && end==b) + { + // it does not matter we put words[i] at the beginning or at the end; + ret = len - (a==b) + dfs(i+1, start, end, words); + } + else + { + // place words[i] at the end + if (end==a) + ret = min(ret, len-1 + dfs(i+1, start, b, words)); + else + ret = min(ret, len + dfs(i+1, start, b, words)); + + // place words[i] at the beginning + if (start==b) + ret = min(ret, len-1 + dfs(i+1, a, end, words)); + else + ret = min(ret, len + dfs(i+1, a, end, words)); + } + + memo[i][start][end] = ret; + return ret; + } +}; diff --git a/DFS/2746.Decremental-String-Concatenation/Readme.md b/DFS/2746.Decremental-String-Concatenation/Readme.md new file mode 100644 index 000000000..7c9a8e64b --- /dev/null +++ b/DFS/2746.Decremental-String-Concatenation/Readme.md @@ -0,0 +1,31 @@ +### 2746.Decremental-String-Concatenation + +考虑到n的数量不大,估计可以暴力搜索。顺次遍历每一个单词,我们只需要考察将其加在已有str的前面还是后面两种决策。这看上去复杂度会有2^50,但是我们事实我们并不需要枚举这么多状态。假设前两个单词{abc,aec},那么这两个单词的拼接方式对于后续的选择而言没有不同,因为都是`a****c`。我们能否压缩长度的关键,其实只需要关注str的第一个和最后一个字符即可。于是我们实际需要枚举的状态最多只有50*26*26种。 + +由此我们可以定义递归函数`int dfs(int i, int start, int end)`,表示the minimum length to be added if we construct the first i words with start & end. 也就是说,当前i个单词构造出来的str以start开头、end结尾时,我们需要考虑如何使用words[i]:很明显两种方案,放在前面或者放在后面。此时我们就可以根据start/end与words[i]的首尾字符,进行递归处理: +```cpp +int a = words[i][0]-'a', b = words[i].back()-'a'; +// 放后面 +if (end==a) + ret = min(ret, len-1 + dfs(i+1, start, b, words)); +else + ret = min(ret, len + dfs(i+1, start, b, words)); + +// 放前面 +if (start==b) + ret = min(ret, len-1 + dfs(i+1, a, end, words)); +else + ret = min(ret, len + dfs(i+1, a, end, words)); +``` +最终的答案就是初始调用的`words[0].size() + dfs(1, words[0][0], words[0].back())`,因为对于words[0]我们只有唯一的构造形式。 + +另外,我们必然要用记忆化来避免相同参数的dfs重复调用。 + +更新:为了过更严格的case,需要再加一个优化的技巧 +```cpp +if (start==a && end==b) +{ + // it does not matter we put words[i] at the beginning or at the end; + ret = len - (a==b) + dfs(i+1, start, end, words); +} +``` diff --git a/DFS/2842.Count-K-Subsequences-of-a-String-With-Maximum-Beauty/2842.Count-K-Subsequences-of-a-String-With-Maximum-Beauty.cpp b/DFS/2842.Count-K-Subsequences-of-a-String-With-Maximum-Beauty/2842.Count-K-Subsequences-of-a-String-With-Maximum-Beauty.cpp new file mode 100644 index 000000000..88c46b8f1 --- /dev/null +++ b/DFS/2842.Count-K-Subsequences-of-a-String-With-Maximum-Beauty/2842.Count-K-Subsequences-of-a-String-With-Maximum-Beauty.cpp @@ -0,0 +1,48 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { + int k; + int beauty = 0; + LL global = 0; +public: + void dfs(int curPos, int picked, int curBeauty, LL ret, vector&count) + { + if (curBeauty > beauty) return; + if (picked > k) return ; + + if (curBeauty == beauty && picked == k) + { + global = (global+ret)%M; + return; + } + + if (curBeauty + accumulate(count.begin()+curPos, count.end(), 0) < beauty) return; + + for (int i=curPos; ik = k; + unordered_mapMap; + for (auto ch: s) + Map[ch]+=1; + + vectorcount; + for (auto [k,v]: Map) + count.push_back(v); + + sort(count.rbegin(), count.rend()); + if (count.size() >& grid) + { + dfs(0, 0, grid); + return global; + } + + void dfs(int cur, int moves, vector>& grid) + { + if (moves >= global) return; + + if (cur==9) + { + global = min(global, moves); + return; + } + + int i = cur/3; + int j = cur%3; + if (grid[i][j]!=0) + { + dfs(cur+1, moves, grid); + return; + } + + for (int x=0; x<3; x++) + for (int y=0; y<3; y++) + { + if (grid[x][y]<=1) continue; + grid[x][y] -= 1; + grid[i][j] += 1; + dfs(cur+1, moves+abs(x-i)+abs(y-j), grid); + grid[x][y] += 1; + grid[i][j] -= 1; + } + } +}; diff --git a/DFS/2850.Minimum-Moves-to-Spread-Stones-Over-Grid/Readme.md b/DFS/2850.Minimum-Moves-to-Spread-Stones-Over-Grid/Readme.md new file mode 100644 index 000000000..282e71043 --- /dev/null +++ b/DFS/2850.Minimum-Moves-to-Spread-Stones-Over-Grid/Readme.md @@ -0,0 +1,13 @@ +### 2850.Minimum-Moves-to-Spread-Stones-Over-Grid + +本题的关键点在于判断出时间复杂度,可以用DFS无脑搜索。 + +假设只有一个空格,需要从其他八个格子转移一个过去,那么就有8^1种可能。 + +假设有两个空格,需要从其他七个格子分别转移一个过去,那么就有7^2种可能。 + +假设有三个空格,需要从其他六个格子分别转移一个过去,那么就有6^3种可能。 + +以此类推,5^4, 4^5, 3^6, 2^7, 1^8,其实数值都不大。 + +所以无脑搜索每个空格的转移策略即可。 diff --git a/DFS/291.Word-Pattern-II/291.Word-Pattern-II.cpp b/DFS/291.Word-Pattern-II/291.Word-Pattern-II.cpp index 05213c83a..5110d5eb0 100644 --- a/DFS/291.Word-Pattern-II/291.Word-Pattern-II.cpp +++ b/DFS/291.Word-Pattern-II/291.Word-Pattern-II.cpp @@ -10,8 +10,7 @@ class Solution { bool dfs(int x, int y, string& pattern, string& s) { - if (x==pattern.size() && y==s.size()) - return true; + if (x==pattern.size()) return y==s.size(); char ch = pattern[x]; if (Map1.find(ch)!=Map1.end()) diff --git a/DFS/3213.Construct-String-with-Minimum-Cost/3213.Construct-String-with-Minimum-Cost.cpp b/DFS/3213.Construct-String-with-Minimum-Cost/3213.Construct-String-with-Minimum-Cost.cpp new file mode 100644 index 000000000..71fc75c63 --- /dev/null +++ b/DFS/3213.Construct-String-with-Minimum-Cost/3213.Construct-String-with-Minimum-Cost.cpp @@ -0,0 +1,62 @@ +class TrieNode +{ + public: + TrieNode* next[26]; + int cost; + TrieNode() + { + for (int i=0; i<26; i++) + next[i] = NULL; + cost = -1; + } +}; + +class Solution { + TrieNode* root = new TrieNode(); + vectormemo; +public: + int minimumCost(string target, vector& words, vector& costs) + { + memo = vector(target.size(), -1); + + for (int i=0; inext[ch-'a']==NULL) + node->next[ch-'a'] = new TrieNode(); + node = node->next[ch-'a']; + } + if (node->cost==-1) + node->cost = costs[i]; + else + node->cost = min(node->cost, costs[i]); + } + + int ret = dfs(target, 0); + if (ret == INT_MAX/2) return -1; + else return ret; + } + + int dfs(string& target, int cur) + { + if (cur==target.size()) return 0; + if (memo[cur] != -1) return memo[cur]; + + int ans = INT_MAX/2; + TrieNode* node = root; + for (int i=cur; inext[target[i]-'a']==NULL) + break; + node = node->next[target[i]-'a']; + if (node->cost!=-1) + ans = min(ans, node->cost + dfs(target, i+1)); + } + + memo[cur] = ans; + + return ans; + } +}; diff --git a/DFS/3213.Construct-String-with-Minimum-Cost/Readme.md b/DFS/3213.Construct-String-with-Minimum-Cost/Readme.md new file mode 100644 index 000000000..1efcc2929 --- /dev/null +++ b/DFS/3213.Construct-String-with-Minimum-Cost/Readme.md @@ -0,0 +1,13 @@ +### 3213.Construct-String-with-Minimum-Cost + +此题似乎并没有什么特别好的办法。似乎只能暴力搜索,穷举target里的每一段是否适配某些word。 + +为了高效判定一段字符串是否出现在某些给定的words里,显然我们会先将所有word构造成一棵字典树。将word加入字典树的时候,记得在结尾节点附上该word的cost。如果有多个相同的word,我们只保留最小的cost。 + +我们定义dfs(i)表示target从位置i开始到结尾这段后缀成功分解所能得到的最小代价。我们就有: +```cpp +dfs(i) = min{cost of target[i:j] + dfs(j+1)}; for (int j=i; j>dir; + int memo[501][501][4][2]; +public: + int lenOfVDiagonal(vector>& grid) + { + int m = grid.size(), n = grid[0].size(); + int ret = 0; + dir = {{-1,1},{1,1},{1,-1},{-1,-1}}; + + for (int i=0; i=0 && i=0 && j>& grid, int x, int y, int k, int t) + { + if (memo[x][y][k][t]!=0) return memo[x][y][k][t]; + + int m = grid.size(), n = grid[0].size(); + int ret = 1; + + int i = x+dir[k].first, j = y+dir[k].second; + + if (inbound(i,j,m,n) && canContinue(grid[x][y], grid[i][j])) + ret = max(ret, 1 + dfs(grid,i,j,k,t)); + + if (t==1) + { + int kk=(k+1)%4; + i = x+dir[k].first, j = y+dir[k].second; + if (inbound(i,j,m,n) && canContinue(grid[x][y], grid[i][j])) + ret = max(ret, 1 + dfs(grid,i,j,kk,0)); + } + memo[x][y][k][t] = ret; + return ret; + } +}; diff --git a/DFS/3459.Length-of-Longest-V-Shaped-Diagonal-Segment/Readme.md b/DFS/3459.Length-of-Longest-V-Shaped-Diagonal-Segment/Readme.md new file mode 100644 index 000000000..4d9e572bd --- /dev/null +++ b/DFS/3459.Length-of-Longest-V-Shaped-Diagonal-Segment/Readme.md @@ -0,0 +1,9 @@ +### 3459.Length-of-Longest-V-Shaped-Diagonal-Segment + +很常规的深度优先搜索。每个格子、每个方向只会进入一次。所以最多有`500*500*4=1e6`种状态。再加上有一次转弯的机会,所以2e6种状态是可以遍历和存储下来的。 + +定义dfs(x,y,k,t)表示以k的方向进入(x,y)的格子、且还有t次转弯机会时,还能走的最长路径。如果t==0,那么只能按照k的方向进入下一个(i1,j1);否则还可以考察按照k+1的方向进入下一个(i2,j2). + +注意进入的下一个各自(i,j)和(x,y)要满足数值上的约束,否则即可停止往下搜索。 + +此外,本题的记忆化根据四个参数进行记忆化也是必须的。 diff --git a/DFS/351.Android-Unlock-Patterns/351.Android-Unlock-Patterns.cpp b/DFS/351.Android-Unlock-Patterns/351.Android-Unlock-Patterns.cpp index 040ab40e3..78a9af0d7 100644 --- a/DFS/351.Android-Unlock-Patterns/351.Android-Unlock-Patterns.cpp +++ b/DFS/351.Android-Unlock-Patterns/351.Android-Unlock-Patterns.cpp @@ -1,26 +1,26 @@ class Solution { int count = 0; int m,n; + int visited[3][3]; + vector>dir = {{1,0},{-1,0},{0,1},{0,-1},{1,1},{-1,1},{1,-1},{-1,-1},{-1,2},{1,2},{-2,1},{2,1},{-1,-2},{1,-2},{-2,-1},{2,-1}}; public: int numberOfPatterns(int m, int n) { this->m = m; this->n = n; - auto visited = vector>(3, vector(3,0)); - + for (int i=0; i<3; i++) for (int j=0; j<3; j++) { visited[i][j] = 1; - dfs(i,j,1,visited); + dfs(i,j,1); visited[i][j] = 0; } return count; } - void dfs(int x, int y, int r, vector>&visited) - { - auto dir = vector>({{1,0},{-1,0},{0,1},{0,-1},{1,1},{-1,1},{1,-1},{-1,-1},{-1,2},{1,2},{-2,1},{2,1},{-1,-2},{1,-2},{-2,-1},{2,-1}}); + void dfs(int x, int y, int r) + { if (r>=m && r<=n) count++; if (r>n) return; @@ -34,7 +34,7 @@ class Solution { if (visited[i][j] == 0) { visited[i][j] = 1; - dfs(i,j,r+1,visited); + dfs(i,j,r+1); visited[i][j] = 0; } else @@ -45,7 +45,7 @@ class Solution { continue; if (visited[i][j]==1) continue; visited[i][j] = 1; - dfs(i,j,r+1,visited); + dfs(i,j,r+1); visited[i][j] = 0; } } diff --git a/DFS/3593.Minimum-Increments-to-Equalize-Leaf-Paths/3593.Minimum-Increments-to-Equalize-Leaf-Paths.cpp b/DFS/3593.Minimum-Increments-to-Equalize-Leaf-Paths/3593.Minimum-Increments-to-Equalize-Leaf-Paths.cpp new file mode 100644 index 000000000..ce6b54d7a --- /dev/null +++ b/DFS/3593.Minimum-Increments-to-Equalize-Leaf-Paths/3593.Minimum-Increments-to-Equalize-Leaf-Paths.cpp @@ -0,0 +1,37 @@ +class Solution { + vectoradj[100005]; +public: + pairdfs(int u, int p, vector&cost) { + long long maxPath = 0; + int totalChanged = 0; + vectorpaths; + + for (int v: adj[u]) { + if (v==p) continue; + auto [path, changed] = dfs(v, u, cost); + maxPath = max(maxPath, path); + totalChanged += changed; + paths.push_back(path); + } + + int count = 0; + for (long long p: paths) { + if (p < maxPath) + count++; + } + + return {cost[u]+maxPath, totalChanged + count}; + } + + int minIncrease(int n, vector>& edges, vector& cost) { + for (auto& edge: edges) { + int u = edge[0], v = edge[1]; + adj[u].push_back(v); + adj[v].push_back(u); + } + + auto [_, ret ] = dfs(0, -1, cost); + + return ret; + } +}; diff --git a/DFS/3593.Minimum-Increments-to-Equalize-Leaf-Paths/Readme.md b/DFS/3593.Minimum-Increments-to-Equalize-Leaf-Paths/Readme.md new file mode 100644 index 000000000..6f800c71e --- /dev/null +++ b/DFS/3593.Minimum-Increments-to-Equalize-Leaf-Paths/Readme.md @@ -0,0 +1,5 @@ +### 3593.Minimum-Increments-to-Equalize-Leaf-Paths + +很明显,为了减少increment的操作,我们会将所有root-to-leaf的路径与最长的那条路径对齐。但是我们并不需要显式地先求全局最长的路径,我们只需要用dfs函数将每一棵子树内的所有root-to-leaf路径长度拉齐即可。 + +具体地,对于根节点为u的子树,我们设计dfs返回该子树最终的最长路径maxPath,以及将其所有root-to-leaf的路径拉齐至maxPath所需要的操作数totalChanged. 我们做后序遍历,先对所有子节点都做一遍dfs。那么以u为根的maxPath就是所有子节点里最长的路径mx加上cost[u];以u为根的totalChanged就是所有子节点的totalChanged之和,再加上需要将路径长度拉高至mx的子路径的个数。 diff --git a/DFS/3615.Longest-Palindromic-Path-in-Graph/3615.Longest-Palindromic-Path-in-Graph.cpp b/DFS/3615.Longest-Palindromic-Path-in-Graph/3615.Longest-Palindromic-Path-in-Graph.cpp new file mode 100644 index 000000000..bb03c45b2 --- /dev/null +++ b/DFS/3615.Longest-Palindromic-Path-in-Graph/3615.Longest-Palindromic-Path-in-Graph.cpp @@ -0,0 +1,46 @@ +class Solution { + vectoradj[14]; + int memo[14][14][1<<14]; + string label; +public: + int dfs(int u, int v, int mask) { + if (memo[u][v][mask]!=-1) return memo[u][v][mask]; + int ret = 0; + for (int u2: adj[u]) { + if (mask&(1<>& edges, string label) { + this->label = label; + for (auto& e: edges) { + adj[e[0]].push_back(e[1]); + adj[e[1]].push_back(e[0]); + } + memset(memo, -1, sizeof(memo)); + + int ret = 1; + for (int u=0; udivisors; + vectorcur; + vectorrets; + int bestDiff = INT_MAX/2; +public: + void dfs(int idx, int n, int k) { + if (k==1) { + cur.push_back(n); + int diff = cur.back()-cur[0]; + if (diff < bestDiff) { + bestDiff = diff; + rets = cur; + } + cur.pop_back(); + return; + } + + for (int i = idx; i minDifference(int n, int k) { + for (int i=1; i*i<=n; i++) { + if (n%i==0) { + divisors.push_back(i); + if (i*i!=n) divisors.push_back(n/i); + } + } + sort(divisors.begin(), divisors.end()); + + dfs(0, n, k); + + return rets; + } +}; diff --git a/DFS/3669.Balanced-K-Factor-Decomposition/Readme.md b/DFS/3669.Balanced-K-Factor-Decomposition/Readme.md new file mode 100644 index 000000000..6af2ba84b --- /dev/null +++ b/DFS/3669.Balanced-K-Factor-Decomposition/Readme.md @@ -0,0 +1,7 @@ +### 3669.Balanced-K-Factor-Decomposition + +用sqrt(n)的时间将n的所有divisors求出来。本题转化为在divisors数组中寻找k个元素使得乘积恰好为n。因为k的个数较小,可以用暴力DFS解决。 + +递归函数`dfs(i,n,k)`表示要将n拆分为k个元素的乘积,当前可以从第i个divisor开始选择。选中某个约数d(编号为j)之后,即可递归处理`dfs(j,n/d,k-1)`.当k=1时,即可记录所选中的约数。 + +注意DFS过程中,当选择不同divisor时,已选中的数组需要有回溯操作。 diff --git a/DFS/473.Matchsticks-to-Square/473.Matchsticks-to-Square_v2.cpp b/DFS/473.Matchsticks-to-Square/473.Matchsticks-to-Square_v2.cpp new file mode 100644 index 000000000..b7e13b405 --- /dev/null +++ b/DFS/473.Matchsticks-to-Square/473.Matchsticks-to-Square_v2.cpp @@ -0,0 +1,38 @@ +class Solution { + bool divide2(vector& sticks, int target) { + int n = sticks.size(); + for(int state = 0; state < (1<>i)&1) cur += sticks[i]; + } + if(cur == target) return true; + } + return false; + } +public: + bool makesquare(vector& matchsticks) { + int total = accumulate(matchsticks.begin(), matchsticks.end(), 0); + if(total % 4 != 0) return false; + + int n = matchsticks.size(); + + for(int state = 0; state < (1<>i)&1) cur += matchsticks[i]; + } + if(cur == total / 2) { + vector v1, v2; + for(int i = 0; i < n; ++i) { + if((state>>i)&1) v1.push_back(matchsticks[i]); + else v2.push_back(matchsticks[i]); + } + + if(divide2(v1, total / 4) && divide2(v2, total / 4)) return true; + } + } + + return false; + } +}; diff --git a/DFS/698.Partition-to-K-Equal-Sum-Subsets/698.Partition-to-K-Equal-Sum-Subsets.cpp b/DFS/698.Partition-to-K-Equal-Sum-Subsets/698.Partition-to-K-Equal-Sum-Subsets_v1.cpp similarity index 100% rename from DFS/698.Partition-to-K-Equal-Sum-Subsets/698.Partition-to-K-Equal-Sum-Subsets.cpp rename to DFS/698.Partition-to-K-Equal-Sum-Subsets/698.Partition-to-K-Equal-Sum-Subsets_v1.cpp diff --git a/DFS/698.Partition-to-K-Equal-Sum-Subsets/698.Partition-to-K-Equal-Sum-Subsets_v2.cpp b/DFS/698.Partition-to-K-Equal-Sum-Subsets/698.Partition-to-K-Equal-Sum-Subsets_v2.cpp new file mode 100644 index 000000000..b3c90ccd8 --- /dev/null +++ b/DFS/698.Partition-to-K-Equal-Sum-Subsets/698.Partition-to-K-Equal-Sum-Subsets_v2.cpp @@ -0,0 +1,25 @@ +class Solution { +public: + bool canPartitionKSubsets(vector& nums, int k) + { + int n = nums.size(); + int sum = accumulate(nums.begin(), nums.end(), 0); + if (sum%k!=0) return false; + int target = sum / k; + + vectordp(1<>i)&1) && (dp[state]+nums[i] <= target)) + dp[state + (1<q; for (int i=0; i0 && points[q.front()][0] < points[i][0]-k) + while (q.size()>0 && points[q.front()][0] < points[i][0]-k) { q.pop_front(); + } - if (q.size() > 0) + if (q.size() > 0) { ret = max(ret, -points[q.front()][0]+points[q.front()][1] + points[i][0]+points[i][1]); - - while (q.size()>0 && -points[q.back()][0]+points[q.back()][1] < -points[i][0]+points[i][1]) + } + + while (q.size()>0 && -points[q.back()][0]+points[q.back()][1] < -points[i][0]+points[i][1]) { q.pop_back(); + } + q.push_back(i); + } return ret; - } }; diff --git a/Deque/1499.Max-Value-of-Equation/Readme.md b/Deque/1499.Max-Value-of-Equation/Readme.md index ebeb9fe22..37ab1180c 100644 --- a/Deque/1499.Max-Value-of-Equation/Readme.md +++ b/Deque/1499.Max-Value-of-Equation/Readme.md @@ -1,5 +1,5 @@ ### 1499.Max-Value-of-Equation -如果我们固定j点,那么原题就是求```max{yi+yj+xj-xi} = max{-xi+yi} + xj+yj ```.也就是要在|xi-xj|& chargeTimes, vector& runningCosts, long long budget) + { + vector>robots; + int n = chargeTimes.size(); + + LL left = 0, right = n; + while (left < right) + { + LL mid = right-(right-left)/2; + if (isOK(mid, chargeTimes, runningCosts, budget)) + left = mid; + else + right = mid-1; + } + return left; + } + + bool isOK(LL k, vector& chargeTimes, vector& runningCosts, long long budget) + { + LL n = chargeTimes.size(); + LL sum = 0; + multisetSet; + + for (int i=0; i=k-1) + { + LL ret = *(Set.rbegin()) + (LL)k * sum; + if (ret <= budget) return true; + sum -= runningCosts[i-k+1]; + Set.erase(Set.find(chargeTimes[i-k+1])); + } + } + + return false; + } +}; diff --git a/Deque/2398.Maximum-Number-of-Robots-Within-Budget/2398.Maximum-Number-of-Robots-Within-Budget_v2.cpp b/Deque/2398.Maximum-Number-of-Robots-Within-Budget/2398.Maximum-Number-of-Robots-Within-Budget_v2.cpp new file mode 100644 index 000000000..fc037f313 --- /dev/null +++ b/Deque/2398.Maximum-Number-of-Robots-Within-Budget/2398.Maximum-Number-of-Robots-Within-Budget_v2.cpp @@ -0,0 +1,129 @@ +using LL = long long; + +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + int info; // the maximum value of the range + bool tag; + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + tag = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = max(left->info, right->info); // check with your own logic + } + } + + void pushDown() + { + if (tag==1 && left) + { + left->info = info; + right->info = info; + left->tag = 1; + right->tag = 1; + tag = 0; + } + } + + void updateRange(int a, int b, int val) // set range [a,b] with val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info = val; + tag = 1; + return; + } + + if (left) + { + pushDown(); + left->updateRange(a, b, val); + right->updateRange(a, b, val); + info = max(left->info, right->info); // write your own logic + } + } + + int queryRange(int a, int b) // query the maximum value within range [a,b] + { + if (b < start || a > end ) + { + return INT_MIN; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + int ret = max(left->queryRange(a, b), right->queryRange(a, b)); + info = max(left->info, right->info); // check with your own logic + return ret; + } + + return info; // should not reach here + } + +}; + +class Solution { + SegTreeNode* root; +public: + int maximumRobots(vector& chargeTimes, vector& runningCosts, long long budget) + { + vector>robots; + int n = chargeTimes.size(); + + root = new SegTreeNode(0, n-1, 0); + for (int i=0; iupdateRange(i, i, chargeTimes[i]); + + LL left = 0, right = n; + while (left < right) + { + LL mid = right-(right-left)/2; + if (isOK(mid, chargeTimes, runningCosts, budget)) + left = mid; + else + right = mid-1; + } + return left; + } + + bool isOK(LL k, vector& chargeTimes, vector& runningCosts, long long budget) + { + LL n = chargeTimes.size(); + LL sum = 0; + + for (int i=0; i=k-1) + { + LL ret = root->queryRange(i-k+1, i) + (LL)k * sum; + if (ret <= budget) return true; + sum -= runningCosts[i-k+1]; + } + } + + return false; + } +}; diff --git a/Deque/2398.Maximum-Number-of-Robots-Within-Budget/2398.Maximum-Number-of-Robots-Within-Budget_v3.cpp b/Deque/2398.Maximum-Number-of-Robots-Within-Budget/2398.Maximum-Number-of-Robots-Within-Budget_v3.cpp new file mode 100644 index 000000000..14eebf7da --- /dev/null +++ b/Deque/2398.Maximum-Number-of-Robots-Within-Budget/2398.Maximum-Number-of-Robots-Within-Budget_v3.cpp @@ -0,0 +1,47 @@ +using LL = long long; +class Solution { +public: + int maximumRobots(vector& chargeTimes, vector& runningCosts, long long budget) + { + vector>robots; + int n = chargeTimes.size(); + + LL left = 0, right = n; + while (left < right) + { + LL mid = right-(right-left)/2; + if (isOK(mid, chargeTimes, runningCosts, budget)) + left = mid; + else + right = mid-1; + } + return left; + } + + bool isOK(LL k, vector& chargeTimes, vector& runningCosts, long long budget) + { + LL n = chargeTimes.size(); + LL sum = 0; + dequedq; + + for (int i=0; i=k-1) + { + LL ret = chargeTimes[dq.front()] + (LL)k * sum; + if (ret <= budget) return true; + sum -= runningCosts[i-k+1]; + + } + } + + return false; + } +}; diff --git a/Deque/2398.Maximum-Number-of-Robots-Within-Budget/Readme.md b/Deque/2398.Maximum-Number-of-Robots-Within-Budget/Readme.md new file mode 100644 index 000000000..e19b01e58 --- /dev/null +++ b/Deque/2398.Maximum-Number-of-Robots-Within-Budget/Readme.md @@ -0,0 +1,7 @@ +### 2398.Maximum-Number-of-Robots-Within-Budget + +很明显,budget越多,能够跑的机器就越多,这是一个单调的过程。所以我们用二分法来试探最大的机器数目。 + +如果确定了一个长度,我们就跑一遍滑窗,需要计算每个时刻滑窗内的元素之和与元素最大值。显然,用单调deque是解sliding window maximum的固定套路,时间复杂度是o(n)。 + +此题如果用一个有序容器(比如说multiset),结果会超时。另外,用线段树也是一个可行的选择。 diff --git a/Deque/2762.Continuous-Subarrays/2762.Continuous-Subarrays.cpp b/Deque/2762.Continuous-Subarrays/2762.Continuous-Subarrays.cpp new file mode 100644 index 000000000..072185152 --- /dev/null +++ b/Deque/2762.Continuous-Subarrays/2762.Continuous-Subarrays.cpp @@ -0,0 +1,36 @@ +using LL = long long; +class Solution { +public: + long long continuousSubarrays(vector& nums) + { + int n = nums.size(); + + dequedq1; + dequedq2; + + int i = 0; + LL ret = 0; + for (int j=0; j nums[j]) + dq2.pop_back(); + dq2.push_back(j); + + while (!dq1.empty() && !dq2.empty() && nums[dq1.front()]-nums[dq2.front()] > 2) + { + if (!dq1.empty() && dq1.front() <= i) + dq1.pop_front(); + if (!dq2.empty() && dq2.front() <= i) + dq2.pop_front(); + i++; + } + ret += LL(j-i+1); + } + + return ret; + } +}; diff --git a/Deque/2762.Continuous-Subarrays/Readme.md b/Deque/2762.Continuous-Subarrays/Readme.md new file mode 100644 index 000000000..27022ba1e --- /dev/null +++ b/Deque/2762.Continuous-Subarrays/Readme.md @@ -0,0 +1,5 @@ +### 2762.Continuous-Subarrays + +这是一个很常见的滑动窗口的题。总的规律是,窗口越长,越不容易满足条件。所以如果我们固定了左端点i,那么可以找到一个最远的右端点j使得[i:j]满足条件。那么以i为左端点的合法subarray的个数就是`j-i+1`.此后,我们必然只能移动左端点至i+1,而右端点必然也需要单调右移。 + +在窗口滑动的过程中,我们需要满足“最大值与最小值”之差不大于2. 显然我们用两个双端队列就能做到实时维护滑窗的最大值和最小值。 diff --git a/Deque/2969.Minimum-Number-of-Coins-for-Fruits-II/2969.Minimum-Number-of-Coins-for-Fruits-II.cpp b/Deque/2969.Minimum-Number-of-Coins-for-Fruits-II/2969.Minimum-Number-of-Coins-for-Fruits-II.cpp new file mode 100644 index 000000000..1a122f4c4 --- /dev/null +++ b/Deque/2969.Minimum-Number-of-Coins-for-Fruits-II/2969.Minimum-Number-of-Coins-for-Fruits-II.cpp @@ -0,0 +1,29 @@ +class Solution { + int dp[100005][2]; +public: + int minimumCoins(vector& prices) + { + int n = prices.size(); + prices.insert(prices.begin(), 0); + dp[1][0] = INT_MAX/2; + dp[1][1] = prices[1]; + + dequedq; + dq.push_back(1); + + for (int i=2; i<=n; i++) + { + dp[i][1] = min(dp[i-1][0], dp[i-1][1]) + prices[i]; + while (!dq.empty() && dq.front()*2= dp[i][1]) + dq.pop_back(); + dq.push_back(i); + } + + return min(dp[n][0], dp[n][1]); + + } +}; diff --git a/Deque/2969.Minimum-Number-of-Coins-for-Fruits-II/Readme.md b/Deque/2969.Minimum-Number-of-Coins-for-Fruits-II/Readme.md new file mode 100644 index 000000000..10e3dbfe6 --- /dev/null +++ b/Deque/2969.Minimum-Number-of-Coins-for-Fruits-II/Readme.md @@ -0,0 +1,11 @@ +### 2969.Minimum-Number-of-Coins-for-Fruits-II + +对于前i件物品而言,我们令dp[i][0]表示第i个水果不付款的最小代价,dp[i][1]表示第i个水果付款的最小代价。显然,我们容易得出 +```cpp +dp[i][1] = min(dp[i-1][0], dp[i-1][1]) + prices[i] +``` +那么对于dp[i][0]而言,第i个水果不用付款,必然是因为某第j个水果付款的缘故(需要满足`2*j>=i`)。这样的j可能有多个 +```cpp +dp[i][0] = min(dp[i-1][j]) j=(i+1)/2, (i+1)/2+1, ..., i-1. +``` +显然,这是求一个滑动窗口的最小值,使用双端队列deque的套路:我们维护一个递增的deque,里面盛装的是dp[x][1]的值。当队首元素不满足`2*j>=i`时就不断弹出,最终队首元素就是合法滑窗内的最小值,即给dp[i][0]赋值。然后将dp[i][1]入队,并将所有队尾元素大于等于dp[i][1]的都弹出,这是因为它们在数值大小或序列先后上都不及第i物品。 diff --git a/Deque/3578.Count-Partitions-With-Max-Min-Difference-at-Most-K/3578.Count-Partitions-With-Max-Min-Difference-at-Most-K.cpp b/Deque/3578.Count-Partitions-With-Max-Min-Difference-at-Most-K/3578.Count-Partitions-With-Max-Min-Difference-at-Most-K.cpp new file mode 100644 index 000000000..5eca1e201 --- /dev/null +++ b/Deque/3578.Count-Partitions-With-Max-Min-Difference-at-Most-K/3578.Count-Partitions-With-Max-Min-Difference-at-Most-K.cpp @@ -0,0 +1,43 @@ +class Solution { +public: + int countPartitions(vector& nums, int k) { + int n = nums.size(); + long M = 1e9+7; + nums.insert(nums.begin(), 0); + + vectordp(n+1); + vectorpresum(n+1); + dp[0] = 1; + presum[0] = 1; + + dequedq1; + dequedq2; + int left = 1; + for (int i=1; i<=n; i++) { + int x = nums[i]; + while (!dq1.empty() && nums[dq1.back()]x) { + dq2.pop_back(); + } + dq2.push_back(i); + + while (left <= i && nums[dq1.front()] - nums[dq2.front()] > k) { + if (dq1.front()==left) dq1.pop_front(); + if (dq2.front()==left) dq2.pop_front(); + left++; + } + // Any valid parition that ends at left-1, left, left+1, ..., i-1 is good. + + dp[i] = presum[i-1] - (left>=2?presum[left-2]:0); + presum[i] = presum[i-1] + dp[i]; + + dp[i] = (dp[i] + M) % M; + presum[i] = (presum[i] + M) % M; + } + + return dp[n]; + } +}; diff --git a/Deque/3578.Count-Partitions-With-Max-Min-Difference-at-Most-K/Readme.md b/Deque/3578.Count-Partitions-With-Max-Min-Difference-at-Most-K/Readme.md new file mode 100644 index 000000000..13958f4b7 --- /dev/null +++ b/Deque/3578.Count-Partitions-With-Max-Min-Difference-at-Most-K/Readme.md @@ -0,0 +1,11 @@ +### 3578.Count-Partitions-With-Max-Min-Difference-at-Most-K + +我们很容易想到令dp[i]表示以i为最后一段区间的结尾,可以得到的切割方案。此时我们需要考虑最后一段的起点位置j可以在哪里。 + +题目条件中“区间最大值与区间最小值的差”,很容易提示我们可以用deque来分别求区间的最大值和最小值。更具体地,当我们从nums[0]开始,不断加入元素,直至将nums[i]纳入区间时,我们可以得到此时区间的最大值a和最小值b:如果a-b>k,那么意味着区间太长,起点位置应该往右移动。这是因为区间越大,就越容易得到更大的a和更小的b,必然有更大的差值。区间越小,a-b的值就会越小,而且这是一个单调的过程。于是我们调整左端点j右移,每移动一次的过程中,我们查看一下nums[j]的退出是否会影响保存区间最大值和最小值的两个deque(因为我们知道当前最大值或最小值必然是deque的首元素)。直至我们将左端点移动到L的位置,意味着区间[L:i]恰好满足最大值与最小值之差小于k。 + +以上说明区间[L:i]是一个合法的切分,同理更小的区间[L+1:i],[L+2:i]...都是满足条件的切分。既然确定了最后一个区间的切法,那么就有`dp[i] = dp[L-1]+dp[L]+dp[L+1]...+dp[i-1]` 显然,我们会用一个DP的前缀和数组presum来辅助,即`dp[i] = presum[i-1]-presum[L-2]`. + +记得得到上述的dp[i]之后,就可以同样更新presum[i]。 + +以1-index考虑的话,最终的结果就是dp[n]。 diff --git a/Deque/862.Shortest-Subarray-with-Sum-at-Least-K/862.Shortest-Subarray-with-Sum-at-Least-K_v2.cpp b/Deque/862.Shortest-Subarray-with-Sum-at-Least-K/862.Shortest-Subarray-with-Sum-at-Least-K_v2.cpp index bfd952ca1..509d46a7f 100644 --- a/Deque/862.Shortest-Subarray-with-Sum-at-Least-K/862.Shortest-Subarray-with-Sum-at-Least-K_v2.cpp +++ b/Deque/862.Shortest-Subarray-with-Sum-at-Least-K/862.Shortest-Subarray-with-Sum-at-Least-K_v2.cpp @@ -1,25 +1,29 @@ +using LL = long long; class Solution { public: - int shortestSubarray(vector& A, int K) + int shortestSubarray(vector& nums, int k) { - int N = A.size(); - vectorpresum(N+1,0); - for (int i=0; iq; - int result = INT_MAX; - for (int i=0; i<=N; i++) + int n = nums.size(); + vectorpresum(n+1); + for (int i=0; idq; + for (int i=0; i<=n; i++) { - while (q.size()>0 && presum[q.front()]+K<=presum[i]) - { - result = min(result,i-q.front()); - q.pop_front(); + while (!dq.empty() && presum[dq.back()] >= presum[i]) + dq.pop_back(); + + while (!dq.empty() && presum[i]-presum[dq.front()] >= k) { + ret = min(ret, i-dq.front()); + dq.pop_front(); } - while (q.size()>0 && presum[q.back()]>=presum[i]) - q.pop_back(); - q.push_back(i); + + dq.push_back(i); } - return result==INT_MAX? -1:result; - + + if (ret == INT_MAX) return -1; + else return ret; } }; diff --git a/Deque/862.Shortest-Subarray-with-Sum-at-Least-K/Readme.md b/Deque/862.Shortest-Subarray-with-Sum-at-Least-K/Readme.md index 37505fedb..9d8d26427 100644 --- a/Deque/862.Shortest-Subarray-with-Sum-at-Least-K/Readme.md +++ b/Deque/862.Shortest-Subarray-with-Sum-at-Least-K/Readme.md @@ -20,16 +20,14 @@ 3.在map删除所有大于preSum[i]的键 ``` -#### 解法2 -上述的解法复杂度是o(NlogN),但实际上还有更好的o(N)的解法. +#### 解法2:单调队列 +上述的解法复杂度是o(NlogN),但实际上还有更好的o(N)的解法.我们基于nums的前缀和数组presum,维护一个双端队列q,保持队列里面的元素是递增的。我们每处理一个新的presum[i],希望在队列里查看最近的j,使得`presum[i]-presum[j]>=k`. 显然我们希望j的位置尽量靠后,同时presum[j]的数值尽量小。 -我们维护一个双端队列q,里面存储的q[j]表示的是一个递增的index的序列.同时要保证presum[q[j]]也是递增的.是不是有点绕? +我们假想,presum的前若干个元素本身就是递增的,那么我们就可以照单全收都放入deque里面。此时如果新元素presum[i]比队尾元素(记做j)要小,那么我们就可以把队尾元素j去掉。这是因为从此以后,presum[j]都不会是最优解所对应的区间左端点。考察上面式子的被减数,这个presum[j]相比于presum[i]而言既“老”又“大”,选j永远不如选i。 -假设我们现在处理A[i],其对应的前缀和是presum[i],那么我们想在这个队列里面找到一个位置j,恰好使得```presum[q[j]]+K<=presum[i]```,那么队列中的q[0]~q[j]这些index都是满足at least K条件的位置,我们可以找其中最大的一个,比如说q[j'],就能使得subarray长度i-q[j']是最小的.接下来的操作很重要,我们可以将q[0]到q[j']都从队列前端弹出.因为以后的i会更大,如果它在队列中找到的满足at least K条件的左边界位置比q[j']小的话,不会比已经得到的result更好.所以任何早于q[j']的队列元素对以后的搜索都没有帮助. +同时针对新加入的presum[i],我们考察队首元素(也记做j),观察是否满足`presum[i]-presum[j]>=k`。如果是的话,显然`[j+1,i]`就是一个合法的解。注意,此时我们就可以将j弹出了。因为我们不需要j再与其他位置(指i之后的)匹配合法的区间了,因为即使存在,那样的区间长度也会更长。 -接下来,我们需要将presum[i]的信息加入这个队列.我们的策略是不断在后端弹出元素,直到```presum[q.back()]Map1; + unordered_mapcount; + +public: + Encrypter(vector& keys, vector& values, vector& dictionary) + { + + for (int i=0; iencrypt(word1); + * int param_2 = obj->decrypt(word2); + */ diff --git a/Design/2227.Encrypt-and-Decrypt-Strings/Readme.md b/Design/2227.Encrypt-and-Decrypt-Strings/Readme.md new file mode 100644 index 000000000..cca42343f --- /dev/null +++ b/Design/2227.Encrypt-and-Decrypt-Strings/Readme.md @@ -0,0 +1,5 @@ +### 2227.Encrypt-and-Decrypt-Strings + +本题的关键在于解码的时候会遇到一对多的反映射,极有可能需要递归分支来处理。其实此题的巧解在于给出的dictionary规模非常小,我们可以将其加密之后与给出的word2进行比较。也就是说,有多少candidates加密之后是word2,那么就意味着word2解密之后有多少是在candidates里面。 + +特别注意,题中保证了所有的word1都能被加密,但是不保证所有的candidate都能够被加密。 diff --git a/Design/2289.Steps-to-Make-Array-Non-decreasing/2289.Steps-to-Make-Array-Non-decreasing_v1.cpp b/Design/2289.Steps-to-Make-Array-Non-decreasing/2289.Steps-to-Make-Array-Non-decreasing_v1.cpp new file mode 100644 index 000000000..98f90b043 --- /dev/null +++ b/Design/2289.Steps-to-Make-Array-Non-decreasing/2289.Steps-to-Make-Array-Non-decreasing_v1.cpp @@ -0,0 +1,48 @@ +class Solution { +public: + int totalSteps(vector& nums) + { + int n = nums.size(); + list List; + unordered_map::iterator>idx2iter; + for (int i=0; iq; + for (int i=n-1; i>=1; i--) + if (nums[i-1]>nums[i]) + q.push(i); + + int step = 0; + while (!q.empty()) + { + int len = q.size(); + vectortemp; + while (len--) + { + int i = q.front(); + q.pop(); + + auto iter = idx2iter[i]; + if (next(iter)!=List.end() && (temp.empty() || *next(iter)!=temp.back())) + { + temp.push_back(*next(iter)); + } + + List.erase(iter); + } + + for (int idx: temp) + { + auto iter = idx2iter[idx]; + if (iter!=List.begin() && nums[*prev(iter)] > nums[idx]) + q.push(idx); + } + step++; + } + return step; + } +}; diff --git a/Design/2289.Steps-to-Make-Array-Non-decreasing/2289.Steps-to-Make-Array-Non-decreasing_v2.cpp b/Design/2289.Steps-to-Make-Array-Non-decreasing/2289.Steps-to-Make-Array-Non-decreasing_v2.cpp new file mode 100644 index 000000000..3937b491c --- /dev/null +++ b/Design/2289.Steps-to-Make-Array-Non-decreasing/2289.Steps-to-Make-Array-Non-decreasing_v2.cpp @@ -0,0 +1,47 @@ +class Solution { +public: + int totalSteps(vector& nums) + { + int n = nums.size(); + vectornext(n); + for (int i=0; iremoved(n); + + queue>q; + for (int i=n-1; i>=1; i--) + { + if (nums[i-1]>nums[i]) + q.push({i-1, i}); + } + + int step = 0; + while (!q.empty()) + { + int len = q.size(); + while (len--) + { + auto [l,r] = q.front(); + int r0 = r; + q.pop(); + + if (removed[l]) continue; + // if (removed[r]) continue; + + removed[r] = 1; + + int r2 = next[r]; + while (r2!=n && removed[r2]) + r2 = next[r2]; + next[r] = r2; + + if (r2!=n && nums[l]>nums[r2]) + q.push({l,r2}); + } + + step++; + } + + return step; + } +}; diff --git a/Design/2289.Steps-to-Make-Array-Non-decreasing/2289.Steps-to-Make-Array-Non-decreasing_v3.cpp b/Design/2289.Steps-to-Make-Array-Non-decreasing/2289.Steps-to-Make-Array-Non-decreasing_v3.cpp new file mode 100644 index 000000000..c64bb311f --- /dev/null +++ b/Design/2289.Steps-to-Make-Array-Non-decreasing/2289.Steps-to-Make-Array-Non-decreasing_v3.cpp @@ -0,0 +1,24 @@ +class Solution { +public: + int totalSteps(vector& nums) + { + int n = nums.size(); + vectorcount(n); + int ret = 0; + stackStack; + for (int i=n-1; i>=0; i--) + { + int temp = 0; + while (!Stack.empty() && nums[i]>nums[Stack.top()]) + { + temp = max(temp+1, count[Stack.top()]); + Stack.pop(); + } + + count[i] = temp; + Stack.push(i); + ret = max(ret, count[i]); + } + return ret; + } +}; diff --git a/Design/2289.Steps-to-Make-Array-Non-decreasing/Readme.md b/Design/2289.Steps-to-Make-Array-Non-decreasing/Readme.md new file mode 100644 index 000000000..a5d381372 --- /dev/null +++ b/Design/2289.Steps-to-Make-Array-Non-decreasing/Readme.md @@ -0,0 +1,75 @@ +### 2289.Steps-to-Make-Array-Non-decreasing + +本题的关键点是,如果某个位置i满足条件nums[i-1]>nums[i]需要被删除,那么导致的后果就是,在下一个回合,如果i后面的那个元素j(不见得是i+1,有可能i+1在这个回合已经被删除了)满足```nums[i-1]>nums[j]```的话,那么j会在“下一个回合”删除。那么如果确保j不是在这个回合就已经被删除了的呢?我们只需要倒序遍历。 + +举个例子,如下图,如果元素j被判定在本回合不会被删除,继续往前遍历,且i+1,i+2,...,j-1这些元素也在本回合删除,那么在考察元素i的时候就有next(i) = j,那么我们就可以安心地把j作为“下一个回合”待删除的对象。 +``` +i-1, i, i+1, i+2, ..., j-1, j + X X X X O +``` +以上的关键就是如何高效地维护next和prev,即如何快速某个元素后面一个/前面一个尚未被删除的元素(或者说idx)。 + +#### 解法1:暴力模拟 +和```LRU Cache```和```LFU Cache```一样,将链表和关于“元素->链表地址”的Hash结合起来用,是一个大杀器,可以保证在o(1)时间内的查找、删除。本题中,我们定义 +```cpp +list List; +unordered_map::iterator>idx2iter; +``` +链表List里面初始节点是一串编号{0,1,2,...,n-1},idx2iter则代表着链表每个节点的指针。 + +我们从后往前遍历,如果某个idx满足被删除的条件,令iter表示该idx的指针,那么就意味着next(iter)直接就是下一个节点的指针(无论两者之间已经删除了多少结果)。我们将```*next(iter)```这个后续编号放入一个candidates集合里,此时可以安心的将iter本身删除,而List的数据结构会自动将前后节点“接合”在一起。 + +我们倒序走完一遍后,剩下的candidates里面的编号,还需要再考察一遍是否满足```nums[*prev(idx2iter[idx])] > nums[idx]```,将那些符合条件的再进行下一遍的倒序遍历。 + +显然,这种结构就是层级遍历的BFS。答案就是看走了几个回合。 + +#### 解法2:高效模拟 +在解法1中我们考察的是点,所以需要依靠List和Hash来额外维护next和prev。一个更好的解法是考察一对pair。同时用removed数组来标记节点的删除,而不用List。 + +我们在层级遍历BFS的过程中,加入的元素记做{l,r}表示此时一对相邻的编号且满足nums[l]>nums[r]. 那么在处理这对pair的时候,我们会标记removed[r]=1,同时根据之前的思路,我们要找到r此时的右邻元素编号next[r]。但是这里有一个问题,next[r]可能恰好在这个回合里面被删除了(因为我们是倒序遍历的),且此时我们必须找到再右邻的元素(不能放弃)。显然我们需要不断跳转来寻找: +```cpp +r2 = r; +while (r2!=n && removed[r2]) + r2 = next[r2]; +``` +递归结束后所找到的r2肯定是当前回合未被删除的,如果满足大小关系,那么[l, r2]就是下一个回合需要考察的pair。记得别忘了更新```next[r] = r2```,这样能加快今后的跳转效率。 + +此外还需要特别说明的是,虽然在这个回合里面,我们已经判定[l,r2]是下一个回合需要考察的pair,但注意到编号l的元素在本回合中尚未被遍历到(因为它是r左边的元素)。等到了下一个回合时,有可能我们发现l已经被删除了,那么此时的这个pair就作废了,跳过即可。但这没有关系,不意味着我们漏掉了r2,因为r2可能还会因为和其他的l2配对在一起。 + +需要提醒的是,这两种模拟的算法都是o(n),因为每删除一个数字,只会用o(1)引进一个新的candidate。 + +#### 解法3:单调栈 +我们从右往左遍历元素,维护一个递减的单调栈。任何一个新元素M,如果它大于栈顶的若干个元素,那么这些元素(包括之前被这些元素弹出的元素)本质上都是因为M而退栈。这个性质与本题的题意非常类似:对于M而言,它右邻的、连续的比M小的元素,都会被M所“吃掉”。 + +我们令count[i]表示元素i吃掉它所“影响”的元素(即i右邻的、连续的比i小的元素)需要多少步。我们画这样一张图模拟单调栈,横轴表示先后顺序,纵轴表示大小关系。 +``` + q +i + p + k (..P..) + j (..K..) + (..J..) +``` +从右往左看,先入栈的是q;然后是一系列(..P..),但它们接着会因为p的入栈而弹出;然后是一系列(..K..),同样它们会因为k的入栈而弹出。再接下来是(..J..)的入栈,随后因为j的入栈而弹出。当考察元素i的时候,栈内从顶至底是[j,k,p,q] + +此时我们考虑如何计算count[i],即将[j, q-1]范围内的元素都吃掉需要花多少步。我们知道,最后一个被i吃掉的元素一定是p,在此之前,(..P..)已经被p吃掉,所需要的步数就是count[p];同时[j,p-1]这部分已经被i吃掉,所需要的步数我们记做f(k),因为k是这个区间里的最大值,也就是最后一个被吃掉的。所以我们就有一个重要的结论: +``` +count[i] = max(f(k)+1, count[p]) +``` +怎么解释这个公式?总的来说,p有两种被吃掉的途径。如果f(k)比较大,那么count[p]耗完之后还需要等若干个回合,等f(k)结束之后,p此时才能与i相邻,故需要再加一步将p吃掉。如果count[p]较大,那么f(k)耗完之后,p已经与i相邻了,故p被i吃掉的这个步骤可以早于count[p],故总的回合数的瓶颈依然是count[p]。 + +那么上式里面的f(k)又从哪来呢?其实类似地发现: +``` +f(k) = max(f(j)+1, count[k]) +``` +于是我们看出来,f()的计算是一个递归的过程。对于count[i],我们需要依次得到f(j),f(k),f(p),而j,k,p也就是在单调栈中被i弹出的元素。最终count[i]也就是f(p)。所以我们利用退栈的过程更新f +```cpp +int f = 0; +while (!Stack.empty() && nums[i]>nums[Stack.top()]) +{ + f = max(f+1, count[Stack.top()]); + Stack.pop(); +} +count[i] = f; +``` +因为最终是一个非递减序列,意味着在原序列里,任意一个元素i都需要将右邻的、连续的比i小的元素都吃掉。所以最终的答案就是所有count[i]里最大的一个值。 diff --git a/Design/2296.Design-a-Text-Editor/2296.Design-a-Text-Editor_v1.cpp b/Design/2296.Design-a-Text-Editor/2296.Design-a-Text-Editor_v1.cpp new file mode 100644 index 000000000..84e0a47db --- /dev/null +++ b/Design/2296.Design-a-Text-Editor/2296.Design-a-Text-Editor_v1.cpp @@ -0,0 +1,80 @@ +class TextEditor { + listt; + list::iterator iter; +public: + TextEditor() { + iter = t.begin(); + } + + void addText(string text) + { + for (auto ch: text) + t.insert(iter, ch); + } + + int deleteText(int k) + { + int ret = 0; + while (iter!=t.begin() && k>0) + { + auto iter2 = prev(iter); + t.erase(iter2); + k--; + ret++; + } + return ret; + } + + string cursorLeft(int k) + { + while (iter!=t.begin() && k>0) + { + iter = prev(iter); + k--; + } + int p = 0; + while (iter!=t.begin() && p<10) + { + iter = prev(iter); + p++; + } + string ret; + for (int i=0; i0) + { + iter = next(iter); + k--; + } + int p = 0; + while (iter!=t.begin() && p<10) + { + iter = prev(iter); + p++; + } + string ret; + for (int i=0; iaddText(text); + * int param_2 = obj->deleteText(k); + * string param_3 = obj->cursorLeft(k); + * string param_4 = obj->cursorRight(k); + */ diff --git a/Design/2296.Design-a-Text-Editor/2296.Design-a-Text-Editor_v2.cpp b/Design/2296.Design-a-Text-Editor/2296.Design-a-Text-Editor_v2.cpp new file mode 100644 index 000000000..a9291983c --- /dev/null +++ b/Design/2296.Design-a-Text-Editor/2296.Design-a-Text-Editor_v2.cpp @@ -0,0 +1,76 @@ +class TextEditor { + stackst1; + stackst2; +public: + TextEditor() { + + } + + void addText(string text) + { + for (auto ch: text) + st1.push(ch); + } + + int deleteText(int k) + { + int ret = min(k, (int)st1.size()); + for (int i=0; iaddText(text); + * int param_2 = obj->deleteText(k); + * string param_3 = obj->cursorLeft(k); + * string param_4 = obj->cursorRight(k); + */ diff --git a/Design/2296.Design-a-Text-Editor/Readme.md b/Design/2296.Design-a-Text-Editor/Readme.md new file mode 100644 index 000000000..c34b28062 --- /dev/null +++ b/Design/2296.Design-a-Text-Editor/Readme.md @@ -0,0 +1,16 @@ +### 2296.Design-a-Text-Editor + +#### 解法1:链表 +本题需要有一种线性的数据结构,能有一个灵活的指针,可以o(1)地删除和添加指向的内部元素,并且依然可以o(1)的时间左移右移。显然这就是链表。 + +C++里面自带链表结构: +```cpp +listList; +``` +该链表的迭代器就是指针 +```cpp +list::iterator iter; +``` + +#### 解法2:两个栈 +我们以指针为界,左边的部分放入一个栈,右边的部分放入一个栈。删除就意味着将弹出左边栈的顶部元素即可。打印的话,因为不超过10个字符,所以从栈顶拿出10个字符暂存下来再放回去就可以了。 diff --git a/Design/642.Design-Search-Autocomplete-System/642.Design-Search-Autocomplete-System.cpp b/Design/642.Design-Search-Autocomplete-System/642.Design-Search-Autocomplete-System.cpp index 9422ade6c..3c9f542d5 100644 --- a/Design/642.Design-Search-Autocomplete-System/642.Design-Search-Autocomplete-System.cpp +++ b/Design/642.Design-Search-Autocomplete-System/642.Design-Search-Autocomplete-System.cpp @@ -1,70 +1,88 @@ -class AutocompleteSystem { - unordered_mapMap; - string data; - - struct cmp +class TrieNode +{ + public: + TrieNode* next[128]; + set>top; + TrieNode() { - bool operator()(paira, pairb) - { - if (a.second==b.second) - return a.firstb.second; - } - }; + for (int i=0; i<128; i++) + next[i] = NULL; + } +}; + +class AutocompleteSystem { + TrieNode* root; + string inputStr; + TrieNode* cur; + int flag = 1; public: - AutocompleteSystem(vector sentences, vector times) + AutocompleteSystem(vector& sentences, vector& times) { + root = new TrieNode(); + cur = root; for (int i=0; inext[k] == NULL) + node->next[k] = new TrieNode(); + node = node->next[k]; + + int f = 0; + for (auto iter = node->top.begin(); iter!=node->top.end(); iter=next(iter)) + { + if (iter->second == sentence) + f = iter->first; + } + if (f!=0) node->top.erase({f, sentence}); + node->top.insert(make_pair(f+freq, sentence)); + + add(node, sentence, i+1, freq); } vector input(char c) { + inputStr.push_back(c); + if (c=='#') { - Map[data]++; - data.clear(); + inputStr.pop_back(); + add(root, inputStr, 0, -1); + inputStr = ""; + cur = root; + flag = 1; return {}; } - - data.push_back(c); - priority_queue,vector>,cmp>pq; - - for (auto x:Map) + else if (flag==0) { - string a=x.first; - if (match(data,a)) - { - pq.push({a,Map[a]}); - if (pq.size()>3) pq.pop(); - } + return {}; } - - vectorresults; - while (!pq.empty()) + else if (cur->next[c]==NULL) { - results.push_back(pq.top().first); - pq.pop(); + flag = 0; + return {}; } - reverse(results.begin(),results.end()); - return results; - } - - bool match(string a, string b) - { - for (int i=0; inext[c]; + vectorrets; + for (auto iter = cur->top.begin(); iter!=cur->top.end(); iter=next(iter)) { - if (i>=b.size() || a[i]!=b[i]) - return false; + rets.push_back(iter->second); + if (rets.size()==3) break; } - return true; + return rets; + } + }; /** * Your AutocompleteSystem object will be instantiated and called as such: - * AutocompleteSystem obj = new AutocompleteSystem(sentences, times); - * vector param_1 = obj.input(c); + * AutocompleteSystem* obj = new AutocompleteSystem(sentences, times); + * vector param_1 = obj->input(c); */ diff --git a/Design/642.Design-Search-Autocomplete-System/Readme.md b/Design/642.Design-Search-Autocomplete-System/Readme.md index acbc38e71..7d02029e9 100644 --- a/Design/642.Design-Search-Autocomplete-System/Readme.md +++ b/Design/642.Design-Search-Autocomplete-System/Readme.md @@ -1,30 +1,10 @@ ### 642.Design-Search-Autocomplete-System -如果不用trie来做的话,可以比较简单地用priority_queue来实现对所有候选语句的排序,选择最终未被弹出的三个字符串。 +我们将所有的句子都构建入一棵字典树。对于每个节点(字母),我们都维护一个```句子-频次```的统计。也就是说,注入句子S时,我们将沿途经过的节点都标记上```freq[S]+=1```. -核心代码非常简单: -``` - struct cmp - { - bool operator()(paira, pairb) - { - if (a.second==b.second) - return a.firstb.second; - } - }; - priority_queue,vector>,cmp>pq; - for (auto x:Map) - { - string a=x.first; - if (match(data,a)) - { - pq.push({a,Map[a]}); - if (pq.size()>3) pq.pop(); - } - } -``` +当依次读入input时,我们维护从root往下走的指针,移动至该单词对应的节点,读取它的freq取出前三名即可。freq需要使用一个自动排序的数据结构。 +记得当input遇到#时,要将之前input的完整句子,从root开始再次构建入这棵字典树。 -[Leetcode Link](https://leetcode.com/problems/design-search-autocomplete-system) \ No newline at end of file + +[Leetcode Link](https://leetcode.com/problems/design-search-autocomplete-system) diff --git a/Divide_Conquer/1649.Create-Sorted-Array-through-Instructions/1649.Create-Sorted-Array-through-Instructions_DivideConque.cpp b/Divide_Conquer/1649.Create-Sorted-Array-through-Instructions/1649.Create-Sorted-Array-through-Instructions_DivideConque.cpp index 09ed2ea13..aa5bed503 100644 --- a/Divide_Conquer/1649.Create-Sorted-Array-through-Instructions/1649.Create-Sorted-Array-through-Instructions_DivideConque.cpp +++ b/Divide_Conquer/1649.Create-Sorted-Array-through-Instructions/1649.Create-Sorted-Array-through-Instructions_DivideConque.cpp @@ -35,37 +35,36 @@ class Solution { numSmaller[i] += iter-(sorted+a); } - int i=a, j=mid+1, p = 0; - while (i<=mid && j<=b) - { - if (sorted[i]<=sorted[j]) - { - temp[p] = sorted[i]; - i++; - } - else - { - temp[p] = sorted[j]; - j++; - } - p++; - } - while (i<=mid) - { - temp[p] = sorted[i]; - i++; - p++; - } - while (j<=b) - { - temp[p] = sorted[j]; - j++; - p++; - } - for (int i=0; i& nums1, vector& nums2, int diff) + { + int n = nums1.size(); + this->diff = diff; + + vectorarr(n); + for (int i=0; i&arr, int a, int b) + { + if (a==b) return; + int mid = a+(b-a)/2; + helper(arr, a, mid); + helper(arr, mid+1, b); + + int i = a; + for (int j=mid+1; j<=b; j++) + { + while (i<=mid && arr[i] <= arr[j]+diff) + i++; + ret += i-a; + } + + inplace_merge(arr.begin()+a, arr.begin()+mid+1, arr.begin()+b+1); + } +}; diff --git a/Divide_Conquer/2426.Number-of-Pairs-Satisfying-Inequality/Readme.md b/Divide_Conquer/2426.Number-of-Pairs-Satisfying-Inequality/Readme.md new file mode 100644 index 000000000..40c3eddfd --- /dev/null +++ b/Divide_Conquer/2426.Number-of-Pairs-Satisfying-Inequality/Readme.md @@ -0,0 +1,9 @@ +### 2426.Number-of-Pairs-Satisfying-Inequality + +稍微转化一下题意,令`arr[i] = nums1[i]-nums2[i]`,本题即是求在arr里的index pair {i,j},满足`arr[i] <= arr[j]+diff`. + +显然,本题很像求数组里的“正序对”数目,自然解法和求数组“逆序对”数目也一模一样,就是经典的分治法。和315,327,493,1649属于同一类型。 + +分治法的递归思想:将区间分为前后两部分,各自递归处理,且保持有序。这样,对于后半部分的每一个arr[j],我们很容易知道在前半部分有多少arr[i]满足`arr[i] <= arr[j]+diff`(用一个指针滑动即可)。然后,将区间的前后两部分归并排序使整个区间继续保持有序,返回。 + +仔细体会,为什么这种方法可以对任意的arr[j]可以穷举到每一个符合的arr[i]?核心在于任何一个在j之前的i,必然会在某个区间内满足:i在前半区间,j在后半区间。 diff --git a/Divide_Conquer/315.Count-of-Smaller-Numbers-After-Self/315.Count-of-Smaller-Numbers-After-Self.cpp b/Divide_Conquer/315.Count-of-Smaller-Numbers-After-Self/315.Count-of-Smaller-Numbers-After-Self.cpp index bbeebc6a3..38457ffee 100644 --- a/Divide_Conquer/315.Count-of-Smaller-Numbers-After-Self/315.Count-of-Smaller-Numbers-After-Self.cpp +++ b/Divide_Conquer/315.Count-of-Smaller-Numbers-After-Self/315.Count-of-Smaller-Numbers-After-Self.cpp @@ -30,7 +30,6 @@ class Solution { } // 将两段已经有序的数组段start~mid,mid+1~end合起来排序。 - // 如果写归并排序的code会更快一些。这里就偷懒了,直接用sort函数。 - sort(sortedNums.begin()+start,sortedNums.begin()+end+1); + inplace_merge(sortedNums.begin()+start, sortedNums.begin()+mid+1, sortedNums.begin()+end+1); } }; diff --git a/Divide_Conquer/315.Count-of-Smaller-Numbers-After-Self/Readme.md b/Divide_Conquer/315.Count-of-Smaller-Numbers-After-Self/Readme.md index dfc0c5e18..531d9d4d6 100644 --- a/Divide_Conquer/315.Count-of-Smaller-Numbers-After-Self/Readme.md +++ b/Divide_Conquer/315.Count-of-Smaller-Numbers-After-Self/Readme.md @@ -10,5 +10,7 @@ 最后注意,本题需要三个数组,nums, sortedNums, count。原来的数据存在nums, 归并排序后的数组存在sortedNums, count[i]对应的是nums[i]的 number of smaller elements to the right. +补充:inplace_merge(iter1, iter2, iter3)可以实现[iter1,iter2)和[iter2,iter3)两段区间的归并排序(前提是两段各自有序)。 -[Leetcode Link](https://leetcode.com/problems/count-of-smaller-numbers-after-self) \ No newline at end of file + +[Leetcode Link](https://leetcode.com/problems/count-of-smaller-numbers-after-self) diff --git a/Divide_Conquer/327.Count-of-Range-Sum/ 327.Count-of-Range-Sum.cpp b/Divide_Conquer/327.Count-of-Range-Sum/327.Count-of-Range-Sum.cpp similarity index 51% rename from Divide_Conquer/327.Count-of-Range-Sum/ 327.Count-of-Range-Sum.cpp rename to Divide_Conquer/327.Count-of-Range-Sum/327.Count-of-Range-Sum.cpp index 8a1e644c7..ecc8aed0e 100644 --- a/Divide_Conquer/327.Count-of-Range-Sum/ 327.Count-of-Range-Sum.cpp +++ b/Divide_Conquer/327.Count-of-Range-Sum/327.Count-of-Range-Sum.cpp @@ -1,6 +1,6 @@ class Solution { int result; - long temp[10001]; + long temp[100005]; public: int countRangeSum(vector& nums, int lower, int upper) { @@ -26,34 +26,36 @@ class Solution { result+=p2-p1; } - int i=a, j=mid+1, p = 0; - while (i<=mid && j<=b) - { - if (nums[i]<=nums[j]) - { - temp[p] = nums[i]; - i++; - } - else - { - temp[p] = nums[j]; - j++; - } - p++; - } - while (i<=mid) - { - temp[p] = nums[i]; - i++; - p++; - } - while (j<=b) - { - temp[p] = nums[j]; - j++; - p++; - } - for (int i=0; i& slices) - { - vectorf(k+1,0); // f[i]: the maximum gain by the current round if we take i slices, and we do take the current slice. - vectorg(k+1,0); // g[i]: the maximum gain by the current round if we take i slices, and we do NOT take the current slice. + { + vectordp0(k+1); + vectordp1(k+1); for (int i=st; i<=en; i++) for (int j=min(k,i-st+1); j>=1; j--) { - g[j] = max(g[j], f[j]); - f[j] = g[j-1] + slices[i]; + dp0[j] = max(dp0[j], dp1[j]); + dp1[j] = dp0[j-1] + slices[i]; } - return max(f[k], g[k]); + return max(dp0[k], dp1[k]); } }; diff --git a/Dynamic_Programming/1388.Pizza-With-3n-Slices/Readme.md b/Dynamic_Programming/1388.Pizza-With-3n-Slices/Readme.md index 58c299be1..16e8ff703 100644 --- a/Dynamic_Programming/1388.Pizza-With-3n-Slices/Readme.md +++ b/Dynamic_Programming/1388.Pizza-With-3n-Slices/Readme.md @@ -1,8 +1,10 @@ ### 1388.Pizza-With-3n-Slices -此题的条件和```213.House-Robber-II```非常相似:永远不能取相邻的两个元素;首尾元素认为是相邻的。此外,本题隐含着领一个条件:最多只能取n/3个元素。 +此题的约束和```213.House-Robber-II```非常相似:1.永远不能取相邻的两个元素;2.首尾元素认为是相邻的(即不能同时取首尾两个元素);3.此外本题还有一个条件,恰好取n/3个。 -当然,我们需要验证一下,是不是任意的n/3个互不相邻的元素集合,都可以按照题目中的取数规则来实现。事实上是可以的。例如```1000101010000```,有13个元素。其中1代表我们打算取的数。我们永远先取较为外层的数(随之删去左右相邻的两个零):原序列可以得到```0010101000```,接下来```0101000```,接下来```1000```,最后```0```.虽然严格的证明不太容易,但是这个规律还是容易发现的。 +事实上,满足前述约束123的任何一种选取方案,都可以对应于一种本题里取pizza的方案。反之也是。严格的证明可以参考[这里](https://leetcode.cn/problems/pizza-with-3n-slices/solution/3n-kuai-pi-sa-by-leetcode-solution/)。 + +当然,我们这里可以简单用一个例子来验证一下。例如```100010101000```,有12个元素。其中三分之一(4个1)代表我们打算取的元素,它们是满足约束1,2,3的。我们先拿走最左边的1(随之删去左右相邻的两个零),原序列可以得到```001010100```,接下来```010100```,接下来```100```,最后恰好取光. 因此,本题就是House-Robber-II再加上取n/3个元素的条件。 diff --git a/Dynamic_Programming/1458.Max-Dot-Product-of-Two-Subsequences/Readme.md b/Dynamic_Programming/1458.Max-Dot-Product-of-Two-Subsequences/Readme.md index a71581039..244e33823 100644 --- a/Dynamic_Programming/1458.Max-Dot-Product-of-Two-Subsequences/Readme.md +++ b/Dynamic_Programming/1458.Max-Dot-Product-of-Two-Subsequences/Readme.md @@ -3,6 +3,6 @@ 这是一道典型的双序列型的DP。令dp[i][j]表示A序列的前i个元素、B序列的前j个元素,可以得到的最大点乘结果。突破口就是看A[i]和B[j]。 1. A[i]和B[j]组成一对,那么dp[i][j] = dp[i-1][j-1]+A[i]*B[j]。注意,当dp[i-1][j-1]<0时,该项其实应该略去,即dp[i][j] = A[i]*B[j]. -2. A[i]和B[j]不组成一对,那么这两个元素必然至少有一个不会被用来参与点乘。所以dp[i][j] = min{dp[i-1][j], dp[i][j-1]}. +2. A[i]和B[j]不组成一对,那么这两个元素必然至少有一个不会被用来参与点乘。所以dp[i][j] = max{dp[i-1][j], dp[i][j-1]}. 最终的答案是dp[m][n]. diff --git a/Dynamic_Programming/1473.Paint-House-III/1473.Paint-House-III_v1.cpp b/Dynamic_Programming/1473.Paint-House-III/1473.Paint-House-III_v1.cpp index 84e963611..9340c7dfe 100644 --- a/Dynamic_Programming/1473.Paint-House-III/1473.Paint-House-III_v1.cpp +++ b/Dynamic_Programming/1473.Paint-House-III/1473.Paint-House-III_v1.cpp @@ -11,10 +11,17 @@ class Solution { for (int k=0; k<=n; k++) dp[i][j][k] = INT_MAX/2; - for (int k=0; k<=n; k++) - dp[0][0][k] = 0; + if (houses[1]!=0) + { + dp[1][1][houses[1]] = 0; + } + else + { + for (int k=1; k<=n; k++) + dp[1][1][k] = cost[1][k-1]; + } - for (int i=1; i<=m; i++) + for (int i=2; i<=m; i++) { if (houses[i]!=0) { diff --git a/Dynamic_Programming/1473.Paint-House-III/1473.Paint-House-III_v2.cpp b/Dynamic_Programming/1473.Paint-House-III/1473.Paint-House-III_v2.cpp index 0ad79e05c..50eca43e6 100644 --- a/Dynamic_Programming/1473.Paint-House-III/1473.Paint-House-III_v2.cpp +++ b/Dynamic_Programming/1473.Paint-House-III/1473.Paint-House-III_v2.cpp @@ -10,11 +10,18 @@ class Solution { for (int j=0; j<=target; j++) for (int k=0; k<=n; k++) dp[i][j][k] = INT_MAX/2; + + if (houses[1]!=0) + { + dp[1][1][houses[1]] = 0; + } + else + { + for (int k=1; k<=n; k++) + dp[1][1][k] = cost[1][k-1]; + } - for (int k=0; k<=n; k++) - dp[0][0][k] = 0; - - for (int i=1; i<=m; i++) + for (int i=2; i<=m; i++) { if (houses[i]!=0) { @@ -28,10 +35,11 @@ class Solution { else dp[i][j][k] = min(dp[i][j][k], dp[i-1][j-1][kk]); } + } } else - { + { for (int j=1; j<=target; j++) { vector>temp; diff --git a/Dynamic_Programming/1473.Paint-House-III/Readme.md b/Dynamic_Programming/1473.Paint-House-III/Readme.md index 02ee623bd..b325e8394 100644 --- a/Dynamic_Programming/1473.Paint-House-III/Readme.md +++ b/Dynamic_Programming/1473.Paint-House-III/Readme.md @@ -7,7 +7,7 @@ 2. 当```house[i]==0```,说明第i个房子可以任意喷涂k=1,2,..,n,记得加上喷涂成本. 同理,遍历前一个房子的颜色kk。如果kk与k相同,那么第i个房子和前面的房子可以合并为一个block,即```dp[i][j][k] = min{self, dp[i-1][j][kk]+cost[i][k]}```。如果kk与k不同,那么第i个房子就是第j个block的第一个,即```dp[i][j][k] = min{self, dp[i-1][j-1][kk]+cost[i][k]}```。 -初始状态是```dp[0][0][j] = 0```,其余的状态都是无穷大。 +初始状态较为容易的写法是对第1座房子做单独分析。如果第一座房子已经喷涂,那么`dp[1][1][houses[1]] = 0`,否则`dp[1][1][k] = cost[1][k]`.其余的状态都设为无穷大。DP的转移从i=2开始。 最终的答案是在所有房子喷涂完、构造了target个block、最后一个房子颜色任意的前提下,取最小值。即```min{dp[m][target][k],for k=1,2,..,n``` diff --git a/Dynamic_Programming/152.Maximum-Product-Subarray/152.Maximum-Product-Subarray.cpp b/Dynamic_Programming/152.Maximum-Product-Subarray/152.Maximum-Product-Subarray.cpp deleted file mode 100644 index 7364334fd..000000000 --- a/Dynamic_Programming/152.Maximum-Product-Subarray/152.Maximum-Product-Subarray.cpp +++ /dev/null @@ -1,18 +0,0 @@ -class Solution { -public: - int maxProduct(vector& nums) - { - long MAX = 1; - long MIN = 1; - long ret = INT_MIN; - - for (int i=0; i& nums) - { - int n = nums.size(); - vectordp1(n); - vectordp2(n); + { + int n = nums.size(); + vectordp1(n); // the max prod subarray ending at i + vectordp2(n); // the min prod subarray ending at i dp1[0] = nums[0]; dp2[0] = nums[0]; - long ret = nums[0]; + LL ret = nums[0]; for (int i=1; ij)?0:dp[i+2][j]), dp[i+1][j]); } - return max(nums[0]+((2>n-2)?0:dp[2][n-2]), dp[1][n-1]); + return std::max(dp[0][n-2], dp[1][n-1]); } }; diff --git a/Dynamic_Programming/2172.Maximum-AND-Sum-of-Array/Readme.md b/Dynamic_Programming/2172.Maximum-AND-Sum-of-Array/Readme.md index 6688609f3..0af68beb7 100644 --- a/Dynamic_Programming/2172.Maximum-AND-Sum-of-Array/Readme.md +++ b/Dynamic_Programming/2172.Maximum-AND-Sum-of-Array/Readme.md @@ -1,6 +1,6 @@ ### 2172.Maximum-AND-Sum-of-Array -本题看上像二分图匹配问题。左边是一堆数字,右边是一对slots,要求匹配的边权之和最大。但是标准的二分图匹配要求每条边不能有公共边,本题则是允许最多两条边共享一个slot节点。 +本题看上像二分图匹配问题。左边是一堆数字,右边是一堆slots,要求匹配的边权之和最大。但是标准的二分图匹配要求每条边不能有公共边,本题则是允许最多两条边共享一个slot节点。 同以往一样,我们不用KM算法来解决带权最大二分图匹配,我们也不考虑最小费用最大流的做法,这里依然用状态压缩DP。 diff --git a/Dynamic_Programming/2189.Number-of-Ways-to-Build-House-of-Cards/2189.Number-of-Ways-to-Build-House-of-Cards.cpp b/Dynamic_Programming/2189.Number-of-Ways-to-Build-House-of-Cards/2189.Number-of-Ways-to-Build-House-of-Cards.cpp new file mode 100644 index 000000000..e7e795735 --- /dev/null +++ b/Dynamic_Programming/2189.Number-of-Ways-to-Build-House-of-Cards/2189.Number-of-Ways-to-Build-House-of-Cards.cpp @@ -0,0 +1,15 @@ +class Solution { +public: + int houseOfCards(int n) + { + vector>dp(n+1, vector(n+1,0)); + dp[0][0] = 1; + for (int i=1; i<=n/2; i++) + for (int j=0; j<=n; j++) + { + dp[i][j] = dp[i-1][j] + (j>=(3*i-1)?dp[i-1][j-(3*i-1)]:0); + } + + return dp[n/2][n]; + } +}; diff --git a/Dynamic_Programming/2189.Number-of-Ways-to-Build-House-of-Cards/Readme.md b/Dynamic_Programming/2189.Number-of-Ways-to-Build-House-of-Cards/Readme.md new file mode 100644 index 000000000..878f04738 --- /dev/null +++ b/Dynamic_Programming/2189.Number-of-Ways-to-Build-House-of-Cards/Readme.md @@ -0,0 +1,11 @@ +### 2189.Number-of-Ways-to-Build-House-of-Cards + +本题的直观解读是要将总数n分成若干行之和,每行的牌数是递增的,并且每行的牌数必须是```2*k+(k-1)```的形式,其中k可以理解为三角形的个数。 + +我们换个角度来想,假设将行数编号1,2,3,4...就对应三角形的个数,那么每行对应的牌数是```2*k+(k-1)```. 我们的任务其实是将在这些行里面挑选出若干,使得其总数为n。我们可以预料,可供选择的行号不会很多,撑死最多也就是n行(事实上单独的第n行就需要共3n-1张的牌数了)。 + +我们令dp[i][j]表示前i行里面(挑选若干行)、并且所用牌的总数为j可以得到的方案数。对于第i行而言只有两种选择:1. 我们不选第i行,即不搭建3i-1这种模式,那么```dp[i][j] = dp[i-1][j]```. 2.我们选择第i行,那么第i行本身占用了3i-1张牌,那么意味着我们关注的就是前i-1行里我们选用j-(3i-1)张牌能搭建多少种合法的方案,所以```dp[i][j] = dp[i-1][j-(3i-1)]```. + +于是本题的转移方程就是 ```dp[i][j] = dp[i-1][j] + dp[i-1][j-(3i-1)]```。 + +初始条件是```dp[0][0] = 1```,这是所有状态的“种子”。 diff --git a/Dynamic_Programming/2209.Minimum-White-Tiles-After-Covering-With-Carpets/2209.Minimum-White-Tiles-After-Covering-With-Carpets.cpp b/Dynamic_Programming/2209.Minimum-White-Tiles-After-Covering-With-Carpets/2209.Minimum-White-Tiles-After-Covering-With-Carpets.cpp new file mode 100644 index 000000000..c95b76519 --- /dev/null +++ b/Dynamic_Programming/2209.Minimum-White-Tiles-After-Covering-With-Carpets/2209.Minimum-White-Tiles-After-Covering-With-Carpets.cpp @@ -0,0 +1,22 @@ +class Solution { + int dp[1001][1001]; +public: + int minimumWhiteTiles(string floor, int numCarpets, int carpetLen) + { + int n = floor.size(); + floor = "#"+floor; + + dp[0][0] = 0; + for (int i=1; i<=n; i++) + for (int j=0; j<=numCarpets; j++) + { + dp[i][j] = INT_MAX/2; + dp[i][j] = min(dp[i][j], dp[i-1][j] + (floor[i]=='1')); + if (j>=1) + dp[i][j] = min(dp[i][j], i>=carpetLen ? dp[i-carpetLen][j-1]:0); + } + + return dp[n][numCarpets]; + + } +}; diff --git a/Dynamic_Programming/2209.Minimum-White-Tiles-After-Covering-With-Carpets/Readme.md b/Dynamic_Programming/2209.Minimum-White-Tiles-After-Covering-With-Carpets/Readme.md new file mode 100644 index 000000000..347b4064e --- /dev/null +++ b/Dynamic_Programming/2209.Minimum-White-Tiles-After-Covering-With-Carpets/Readme.md @@ -0,0 +1,10 @@ +### 2209.Minimum-White-Tiles-After-Covering-With-Carpets + +令dp[i][j]表示前i个格子用j块地毯覆盖,留有的最小白色区域。显然我们分两种情况讨论。 + +1. 如果第i个格子我们不用第j块地毯,那么我们会关注前i-1个格子用j块地毯的覆盖情况,再加上第i个格子本身是否是白色。即```dp[i][j] = dp[i-1][j] + (s[i]=='1')``` +2. 如果第i个格子我们用了第j块地毯,那么这块地毯覆盖了carpetLen的区域。为了尽量节约使用地毯,我们必然希望这块地毯的效用最大化,也就是说,我们必然会把前j-1块地毯用来覆盖前i-carpetLen个格子。故```dp[i][j] = dp[i-carpetLen][j-1]```. + +我们在以上两种方案中取最大值。 + +需要注意的是在第二种情况里,如果```i-carpetLen<=0```怎么办?容易判断,在没有任何格子的情况下,所谓“留有的白色区域”自然也是0。 diff --git a/Dynamic_Programming/2214.Minimum-Health-to-Beat-Game/2214.Minimum-Health-to-Beat-Game.cpp b/Dynamic_Programming/2214.Minimum-Health-to-Beat-Game/2214.Minimum-Health-to-Beat-Game.cpp new file mode 100644 index 000000000..e611d188a --- /dev/null +++ b/Dynamic_Programming/2214.Minimum-Health-to-Beat-Game/2214.Minimum-Health-to-Beat-Game.cpp @@ -0,0 +1,20 @@ +using LL = long long; +class Solution { +public: + long long minimumHealth(vector& damage, int armor) + { + int n = damage.size(); + LL dp0 = 0; + LL dp1 = 0; + LL ret = LLONG_MAX; + for (int i=0; i 0? 0 : (-ret+1); + } +}; diff --git a/Dynamic_Programming/2214.Minimum-Health-to-Beat-Game/Readme.md b/Dynamic_Programming/2214.Minimum-Health-to-Beat-Game/Readme.md new file mode 100644 index 000000000..47f9bd814 --- /dev/null +++ b/Dynamic_Programming/2214.Minimum-Health-to-Beat-Game/Readme.md @@ -0,0 +1,10 @@ +### 2214.Minimum-Health-to-Beat-Game + +我们用dp0[i]表示通过第i关时仍未使用盔甲能够存留的最大血量,dp1[i]表示通过第i关时已经使用盔甲能够存留的最大血量,于是转移方程就是 +``` +dp0[i] = dp0[i] - damage[i]; +dp1[i] = max(dp1[i] - damage[i], dp0[i] - max(0, damage[i]-armor)); +``` +我们假设初始血量是0,模拟走一遍上面的流程。由此,```max(dp0[i], dp1[i])```表示的就是通过第i关所能存留的最大血量。 + +要保证在通过的过程中的最大血量始终都大于0,那么就观察所有```max(dp0[i], dp1[i])```里的最低点,通过加上这个offset来保证全程的血量都不低于1. diff --git a/Dynamic_Programming/2218.Maximum-Value-of-K-Coins-From-Piles/2218.Maximum-Value-of-K-Coins-From-Piles.cpp b/Dynamic_Programming/2218.Maximum-Value-of-K-Coins-From-Piles/2218.Maximum-Value-of-K-Coins-From-Piles.cpp new file mode 100644 index 000000000..95dc2dbf0 --- /dev/null +++ b/Dynamic_Programming/2218.Maximum-Value-of-K-Coins-From-Piles/2218.Maximum-Value-of-K-Coins-From-Piles.cpp @@ -0,0 +1,32 @@ +class Solution { + int dp[1002][2002]; +public: + int maxValueOfCoins(vector>& piles, int k) + { + int n = piles.size(); + vector>presum(n); + for (int i=0; i=1 && k==(s[i]-'0')) + dp[i][j][k] += dp[i-1][j-1][1-k]; + } + + return dp[n][3][0] + dp[n][3][1]; + } +}; diff --git a/Dynamic_Programming/2222.Number-of-Ways-to-Select-Buildings/Readme.md b/Dynamic_Programming/2222.Number-of-Ways-to-Select-Buildings/Readme.md new file mode 100644 index 000000000..235aa6dec --- /dev/null +++ b/Dynamic_Programming/2222.Number-of-Ways-to-Select-Buildings/Readme.md @@ -0,0 +1,9 @@ +### 2222.Number-of-Ways-to-Select-Buildings + +我们在考虑第i个建筑是否被选中时,需要考虑的因素有:我们已经选中了多少?上一个选中的和当前这个是否是同一个类型? + +想清楚这些,我们就可以设计状态:dp[i][j][k]表示考虑完第i幢建筑时,如果已经选中了j个,并且最近一个被选中的建筑类别是k时,总共有多少种方案。 + +状态转移时需要分两种情况:如果我们不选第i个建筑,那么自然dp[i][j][k] = dp[i-1][j][k]。如果我们选中第i个建筑,那么需要保证第i个建筑与类别k是匹配的,于是这就取决于之前解决过的一个问题:在前i-1个建筑里,选择j-1个,并且最近一个选中的建筑类别是1-k,这样的方案有多少。依据dp[i-1][j][1-k],再选中第i个建筑,就是dp[i][j][k]。 + +最终答案是dp[n][3][0]+dp[n][3][1]. diff --git a/Dynamic_Programming/2247.Maximum-Cost-of-Trip-With-K-Highways/2247.Maximum-Cost-of-Trip-With-K-Highways.cpp b/Dynamic_Programming/2247.Maximum-Cost-of-Trip-With-K-Highways/2247.Maximum-Cost-of-Trip-With-K-Highways.cpp new file mode 100644 index 000000000..64a81808c --- /dev/null +++ b/Dynamic_Programming/2247.Maximum-Cost-of-Trip-With-K-Highways/2247.Maximum-Cost-of-Trip-With-K-Highways.cpp @@ -0,0 +1,38 @@ +class Solution { +public: + int maximumCost(int n, vector>& highways, int k) + { + vector>>next(n); + for (auto highway: highways) + { + int a = highway[0], b = highway[1], t = highway[2]; + next[a].push_back({b,t}); + next[b].push_back({a,t}); + } + + int ret = -1; + vector>dp(1<(n, INT_MIN)); + for (int i=0; i>last)&1)==0) continue; + for (auto nxt: next[last]) + { + auto [j, t] = nxt; + if ((state>>j)&1) continue; + dp[state+(1<>& grid) + { + int m = grid.size(), n = grid[0].size(); + set dp[101][101]; + + if (grid[0][0]=='(') + dp[0][0] = {1}; + else + dp[0][0] = {}; + + for (int i=0; i=1) + { + for (int x: dp[i-1][j]) + if (x+k>=0 && (m+n-1)-(i+j+1)>=(x+k)) + dp[i][j].insert(x+k); + } + if (j>=1) + for (int x: dp[i][j-1]) + { + if (x+k>=0 && (m+n-1)-(i+j+1)>=(x+k)) + dp[i][j].insert(x+k); + } + } + + return dp[m-1][n-1].find(0)!=dp[m-1][n-1].end(); + } +}; diff --git a/Dynamic_Programming/2267.Check-if-There-Is-a-Valid-Parentheses-String-Path/2267.Check-if-There-Is-a-Valid-Parentheses-String-Path_v2.cpp b/Dynamic_Programming/2267.Check-if-There-Is-a-Valid-Parentheses-String-Path/2267.Check-if-There-Is-a-Valid-Parentheses-String-Path_v2.cpp new file mode 100644 index 000000000..70d684190 --- /dev/null +++ b/Dynamic_Programming/2267.Check-if-There-Is-a-Valid-Parentheses-String-Path/2267.Check-if-There-Is-a-Valid-Parentheses-String-Path_v2.cpp @@ -0,0 +1,26 @@ +class Solution { + bool dp[101][101][103]; +public: + bool hasValidPath(vector>& grid) + { + int m = grid.size(), n = grid[0].size(); + + if (grid[0][0]=='(') + dp[0][0][1] = true; + + for (int i=0; i0 && grid[i][j]=='(') + dp[i][j][k] = (i>=1 && dp[i-1][j][k-1]) || (j>=1 && dp[i][j-1][k-1]); + else if (grid[i][j]==')') + dp[i][j][k] = (i>=1 && dp[i-1][j][k+1]) || (j>=1 && dp[i][j-1][k+1]); + } + + return dp[m-1][n-1][0]; + + } +}; diff --git a/Dynamic_Programming/2267.Check-if-There-Is-a-Valid-Parentheses-String-Path/Readme.md b/Dynamic_Programming/2267.Check-if-There-Is-a-Valid-Parentheses-String-Path/Readme.md new file mode 100644 index 000000000..23e2d1e94 --- /dev/null +++ b/Dynamic_Programming/2267.Check-if-There-Is-a-Valid-Parentheses-String-Path/Readme.md @@ -0,0 +1,13 @@ +### 2267.Check-if-There-Is-a-Valid-Parentheses-String-Path + +我们做过很多关于valid parentheses的题目。对于一个合法的括号字符串,必须满足两个条件:1. 任意的前缀字符串必须满足左括号的数目不能少于右括号的数目。2. 整个字符串结尾处,左右括号的数目必须相等。更具体的做法,就是用一个计数器count,来记录当前未被匹配的左括号的数量,一定在从左往右遍历的过程中发现count小于0,那么即可判定该字符串不可能是valid parentheses. + +本题形式上非常类似常规的“走迷宫”型DP。事实上,也确实是同样的套路。我们想用dp[i][j]表示某个字符串路径到达(i,j)时是否依然合法,必然需要记录的就是未被匹配的左括号数量。但是由于之前路径的不同,到达(i,j)时的未匹配左括号的数量也必然可能有多个,所以本题里dp[i][j]其实是一个集合。 + +dp[i][j]的前驱状态有两个,dp[i-1][j]和dp[i][j-1]. 假设前驱状态的两个集合总共包含了{0,2,3},并且(i,j)是一个左括号,那么dp[i][j]就可以是{1,3,4},也就是将前者集合的元素都增1. 相反地,如果(i,j)是一个右括号,那么需要和之前的左括号对消,故dp[i][j]就是{1,2}. 特别注意,d[i][j]里面不能加入-1,因为不会有valid parentheses里面(哪怕是暂时地)出现未匹配的左括号是负数。 + +特别地,如果dp[i][j]的前驱状态的集合是空集,那么意味着无论如何,从起点到(i,j)都不会有合法的路径,故dp[i][j]也必须赋值为空集。 + +另外,本题有一个剪枝的技巧。因为路径的步长是固定,当我们走完一定步数之后,发现剩下未走的步数就算都看做是右括号,也无法抵消当前剩余的左括号的话,那么注定这条路径是不能成功的,就可以提前终止。 + +最终的答案就是查看dp[m-1][n-1]这个集合是否包含零元素。 diff --git a/Dynamic_Programming/2272.Substring-With-Largest-Variance/2272.Substring-With-Largest-Variance_v1.cpp b/Dynamic_Programming/2272.Substring-With-Largest-Variance/2272.Substring-With-Largest-Variance_v1.cpp new file mode 100644 index 000000000..a6128c3ff --- /dev/null +++ b/Dynamic_Programming/2272.Substring-With-Largest-Variance/2272.Substring-With-Largest-Variance_v1.cpp @@ -0,0 +1,35 @@ +class Solution { +public: + int largestVariance(string s) + { + int n = s.size(); + unordered_setSet(s.begin(), s.end()); + + int ret = 0; + + for (auto a: Set) + for (auto b: Set) + { + if (a==b) continue; + int curSum0 = 0, curSum1 = INT_MIN/2; + + for (int i=0; i>Map; + for (int i=0; i& present, vector& future, int budget) + { + int n = present.size(); + + vectordp(1001); + for (int i=0; i=present[i]; j--) + { + dp[j] = max(dp[j], dp[j-present[i]]+future[i]-present[i]); + } + + return dp[budget]; + } +}; diff --git a/Dynamic_Programming/2291.Maximum-Profit-From-Trading-Stocks/Readme.md b/Dynamic_Programming/2291.Maximum-Profit-From-Trading-Stocks/Readme.md new file mode 100644 index 000000000..e5a744797 --- /dev/null +++ b/Dynamic_Programming/2291.Maximum-Profit-From-Trading-Stocks/Readme.md @@ -0,0 +1,3 @@ +### 2291.Maximum-Profit-From-Trading-Stocks + +非常直观的01背包问题。挨个遍历物品。考察对于给定某budget情况下,加入这个物品是否能带来更大的收益,即```dp[budget] = max(dp[budget], dp[budget-cost[i]] + profit[i])``` diff --git a/Dynamic_Programming/2312.Selling-Pieces-of-Wood/2312.Selling-Pieces-of-Wood.cpp b/Dynamic_Programming/2312.Selling-Pieces-of-Wood/2312.Selling-Pieces-of-Wood.cpp new file mode 100644 index 000000000..b7630b5e0 --- /dev/null +++ b/Dynamic_Programming/2312.Selling-Pieces-of-Wood/2312.Selling-Pieces-of-Wood.cpp @@ -0,0 +1,22 @@ +using LL = long long; +class Solution { + LL dp[201][201]; +public: + long long sellingWood(int m, int n, vector>& prices) + { + for (auto x: prices) + dp[x[0]][x[1]] = x[2]; + + for (int i=1; i<=m; i++) + for (int j=1; j<=n; j++) + { + for (int k=1; k1) continue; + + for (int x=1; x<=6; x++) + { + if (x!=b) + { + dp[i][a][b] += dp[i-1][x][a]; + dp[i][a][b] %= M; + } + } + + if (i==n) + ret = (ret + dp[i][a][b]) %M; + } + + return ret; + } +}; diff --git a/Dynamic_Programming/2318.Number-of-Distinct-Roll-Sequences/Readme.md b/Dynamic_Programming/2318.Number-of-Distinct-Roll-Sequences/Readme.md new file mode 100644 index 000000000..507701172 --- /dev/null +++ b/Dynamic_Programming/2318.Number-of-Distinct-Roll-Sequences/Readme.md @@ -0,0 +1,13 @@ +### 2318.Number-of-Distinct-Roll-Sequences + +我们在基于前i-1位的方案之上,考虑序列的第i位的填充时,思考能否填写某个数字d,需要的约束有:和前一位不能相等,和前一位必须互质,和前两位不能相等。可见,dp[i]关系到了前两位的具体方案。所以我们设计状态dp[i][a][b],表示前i位里最后两位数字分别是a和b的情况下,所有的合法方案数目。 + +接下来我们就很容易看出dp[i]与dp[i-1]之间的转移关系。因为i的最后两位是a和b,那么i-1的最后两位必然是某个数x和a。所以我们枚举所有合法的x使得dp[i]能将b接在dp[i-1]之后,要使得```xab```合法需要的条件是: +1. a和x不能相等 +2. b和x不能相等 +3. x和a必须互质 +满足这些条件的话,就意味着```dp[i][a][b] += dp[i-1][x][a]```,即将所有合法的dp[i-1][x][a]方案后面直接加上一个b。 + +有人会说,这里dp[i][a][b]没有考虑检查a是否和x之前的那个数字相同呀。事实上,dp[i]的合法性是建立在dp[i-1]的基础上的。如果dp[i-1][x][a]代表了合法的方案数目,那么自然就不会存在a与x之前的数字相同的问题。 + +在实际计算中,我们在更新所有dp[i][a][b]的过程中,可以通过计算ab本身的合法性,来提前跳过一些不合法的dp[i][a][b] diff --git a/Dynamic_Programming/2320.Count-Number-of-Ways-to-Place-Houses/2320.Count-Number-of-Ways-to-Place-Houses_v1.cpp b/Dynamic_Programming/2320.Count-Number-of-Ways-to-Place-Houses/2320.Count-Number-of-Ways-to-Place-Houses_v1.cpp new file mode 100644 index 000000000..34580f542 --- /dev/null +++ b/Dynamic_Programming/2320.Count-Number-of-Ways-to-Place-Houses/2320.Count-Number-of-Ways-to-Place-Houses_v1.cpp @@ -0,0 +1,23 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { + LL dp[10001][2]; + // dp[i][0]: the # of plans so that there is no building at the i-th plot + // dp[i][1]: the # of plans so that there is a building at the i-th plot +public: + int countHousePlacements(int n) + { + dp[0][0] = 1; + dp[0][1] = 0; + + for (int i=1; i<=n; i++) + { + dp[i][0] = (dp[i-1][0] + dp[i-1][1])%M; + dp[i][1] = dp[i-1][0]; + } + + LL ret = (dp[n][0]+dp[n][1]) % M; + + return ret * ret % M; + } +}; diff --git a/Dynamic_Programming/2320.Count-Number-of-Ways-to-Place-Houses/2320.Count-Number-of-Ways-to-Place-Houses_v2.cpp b/Dynamic_Programming/2320.Count-Number-of-Ways-to-Place-Houses/2320.Count-Number-of-Ways-to-Place-Houses_v2.cpp new file mode 100644 index 000000000..ff32bab04 --- /dev/null +++ b/Dynamic_Programming/2320.Count-Number-of-Ways-to-Place-Houses/2320.Count-Number-of-Ways-to-Place-Houses_v2.cpp @@ -0,0 +1,22 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { + LL dp[10001]; // dp[i]: the # of plans so that there is a building at the i-th plot +public: + int countHousePlacements(int n) + { + dp[0] = 0; + dp[1] = 1; + + for (int i=2; i<=n; i++) + { + dp[i] = (dp[i-1] + dp[i-2])%M; + } + + LL ret = 1; + for (int i=1; i<=n; i++) + ret = (ret+dp[i]) % M; + + return ret * ret % M; + } +}; diff --git a/Dynamic_Programming/2320.Count-Number-of-Ways-to-Place-Houses/2320.Count-Number-of-Ways-to-Place-Houses_v3.cpp b/Dynamic_Programming/2320.Count-Number-of-Ways-to-Place-Houses/2320.Count-Number-of-Ways-to-Place-Houses_v3.cpp new file mode 100644 index 000000000..9250a2a75 --- /dev/null +++ b/Dynamic_Programming/2320.Count-Number-of-Ways-to-Place-Houses/2320.Count-Number-of-Ways-to-Place-Houses_v3.cpp @@ -0,0 +1,27 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { + unordered_mapmemo; +public: + int countHousePlacements(int n) + { + LL ret = 0; + for (int r=0; 2*r-1<=n; r++) + ret = (ret + C(n-r+1, r))%M; + return ret * ret % M; + } + + LL C(int x, int y) + { + if (x& nums1, vector& nums2) + { + return max(solve(nums1,nums2), solve(nums2,nums1)); + } + int solve(vector& nums1, vector& nums2) + { + int n = nums1.size(); + vectornums(n); + for (int i=0; iprimes = Eratosthenes(maxValue); + + LL ret = 1; + for (int t=2; t<=maxValue; t++) + { + int x = t; + LL ans = 1; + for (auto p: primes) + { + int count = 0; + while (x>1 && (x%p==0)) + { + x/=p; + count++; + } + ans = ans * dp[n][count] % M; + } + ret = (ret + ans) % M; + } + + return ret; + } + + vectorEratosthenes(int n) + { + vectorq(n+1,0); + vectorprimes; + for (int i=2; i<=sqrt(n); i++) + { + if (q[i]==1) continue; + int j=i*2; + while (j<=n) + { + q[j]=1; + j+=i; + } + } + for (int i=2; i<=n; i++) + { + if (q[i]==0) + primes.push_back(i); + } + return primes; + } +}; diff --git a/Dynamic_Programming/2338.Count-the-Number-of-Ideal-Arrays/Readme.md b/Dynamic_Programming/2338.Count-the-Number-of-Ideal-Arrays/Readme.md new file mode 100644 index 000000000..52964c695 --- /dev/null +++ b/Dynamic_Programming/2338.Count-the-Number-of-Ideal-Arrays/Readme.md @@ -0,0 +1,21 @@ +### 2338.Count-the-Number-of-Ideal-Arrays + +我们令序列的最后一个元素是x,那么这个长度为n的序列的本质就是从1开始,每次乘以1或者一个x的非1的因数,直至最后一个元素变成x。显然,这些非1的因数的集合必须是x的一个分解,比如说,当n=4, x=30的时候,可以有```30 = 1*2*3*5```,对应的序列就是{1,2,6,30};或者```30 = 5*1*3*2```,对应的序列就是{5,5,15,30},以及其他。 + +考虑到x的普通因数分解其实太多了,但是质因数分解是唯一的,比如记做```x = a*b*c...```。所以我们很容易发现,我们本质只需要将每个质因数任意地分配这n个位置上,最后都能对应一个符合条件的序列。特别注意,每个位置可以允许放置多个质因数。比如上面的例子,将30的质因数分解```2*3*5```任意丢进4个位置上:我们可以```(),(2),(5),(3)```,那么本质上就对应了序列{1,2,10,30};我们也可以```(),(2,3),(5),()```,那么本质上就对应了序列{1,6,30,30}。 + +既然每个质因数都可以独立放置,那么假设x总共有k个质因数,那么以x为结尾的序列个数是不是就是```n^k```呢?我们这里发现了一个问题,那就是没法保证这些序列是distinct的,问题出在相同的质因数上。比如说n=2,x=4的例子,x有两个相同的质因数(记做2a和2b),如果各自独立地扔进两个位置,那么我们会有四种分配方式 ```{a1, a2}, {a2, a1}, {_ , a1*a2}, {a1*a2, _}```. 但是前两者序列都对应了{2,4}这一样的序列。 + +至此,我们调整目标,当前需要解决的问题是:对于k个相同的质因数,我们想将它们分配在n个位置上(允许一个位置有多个),那么有多少种看上去“不同”的分配方式。这应该就是一个典型的DP题。我们类似地定义dp[i][j],那么转移方程的关键就是看第i个位置上我们放置了多少个质因数,假设如果有t个,那么问题就转移到了dp[i-1][j-t]。所以大致的dp方程就是 +```cp +for (int i=1; i<=n; i++) + for (int j=0; j<=k; j++) + { + for (int t=0; t<=j; t++) + dp[i][j] += dp[i-1][j-t]; + } +``` +预处理全部dp值的时间复杂度是o(NKK),其中K是对于x的某个质因数的个数。考虑到x的上限是10000,就算这个质因数是最小的2,那么重复出现的次数也不会超过14,否则2^14就超过了上限。所以dp的时间复杂度就是o(196N),考虑到N是1e4,那么恰好完美符合题目的预期。 + +综上,本题的解法是:从1到MaxVal遍历x作为序列的最后一个元素:对x做质因数分解,对于每种质因数,如果个数是k,我们就有dp[n][k]种分配方法,然后将所有不同质因数的分配方法相乘。最后把不同的x的结果再相加。 + diff --git a/Dynamic_Programming/2361.Minimum-Costs-Using-the-Train-Line/2361.Minimum-Costs-Using-the-Train-Line.cpp b/Dynamic_Programming/2361.Minimum-Costs-Using-the-Train-Line/2361.Minimum-Costs-Using-the-Train-Line.cpp new file mode 100644 index 000000000..e29f45d2d --- /dev/null +++ b/Dynamic_Programming/2361.Minimum-Costs-Using-the-Train-Line/2361.Minimum-Costs-Using-the-Train-Line.cpp @@ -0,0 +1,26 @@ +using LL = long long; +class Solution { + LL dp[100005][2]; +public: + vector minimumCosts(vector& regular, vector& express, int expressCost) + { + int n = regular.size(); + regular.insert(regular.begin(), 0); + express.insert(express.begin(), 0); + + dp[0][0] = 0; + dp[0][1] = expressCost; + + vectorrets; + + for (int i=1; i<=n; i++) + { + dp[i][0] = min(dp[i-1][0] + regular[i], dp[i-1][1] + regular[i]); + dp[i][1] = min(dp[i-1][1] + express[i], dp[i-1][0] + expressCost + express[i]); + + rets.push_back(min(dp[i][0], dp[i][1])); + } + + return rets; + } +}; diff --git a/Dynamic_Programming/2361.Minimum-Costs-Using-the-Train-Line/Readme.md b/Dynamic_Programming/2361.Minimum-Costs-Using-the-Train-Line/Readme.md new file mode 100644 index 000000000..de0a1a977 --- /dev/null +++ b/Dynamic_Programming/2361.Minimum-Costs-Using-the-Train-Line/Readme.md @@ -0,0 +1,8 @@ +### 2361.Minimum-Costs-Using-the-Train-Line + +很明显,状态变量dp[i][0]表示到达第i个车站的regular所需要的最小代价,dp[i][1]表示到达第i个车站的express所需要的最小代价。于是有转移方程: +```cpp +dp[i][0] = min(dp[i-1][0] + regular[i], dp[i-1][1] + regular[i]); +dp[i][1] = min(dp[i-1][1] + express[i], dp[i-1][0] + expressCost + express[i]); +``` +注意我们不需要考虑dp[i][0]与dp[i][1]之间的转移。这是因为,我们如果想要从dp[i][0]转移到dp[i][1],其目的一定只是为了后续得到dp[i+1][1]。单独从第i站的角度来看,只要到了regular或express都算达成了任务,两者间的跳转对于第i站而言没有意义。 diff --git a/Dynamic_Programming/2403.Minimum-Time-to-Kill-All-Monsters/2403.Minimum-Time-to-Kill-All-Monsters.cpp b/Dynamic_Programming/2403.Minimum-Time-to-Kill-All-Monsters/2403.Minimum-Time-to-Kill-All-Monsters.cpp new file mode 100644 index 000000000..eb6a77b17 --- /dev/null +++ b/Dynamic_Programming/2403.Minimum-Time-to-Kill-All-Monsters/2403.Minimum-Time-to-Kill-All-Monsters.cpp @@ -0,0 +1,20 @@ +using LL = long long; +class Solution { +public: + long long minimumTime(vector& power) + { + int n = power.size(); + vectordp(1<>i)&1) + dp[state] = min(dp[state], dp[state- (1<=0; i--) + for (int j=n-1; j>=i+1; j--) + { + if (s[i]==s[j]) + lcs[i][j] = lcs[i+1][j+1]+1; + } + + + for (int i=n-1; i>=0; i--) + { + dp[i] = 1; + for (int j=i+1; j=j-i) + { + dp[i] = max(dp[i], dp[j]+1); + } + } + } + return dp[0]; + } +}; diff --git a/Dynamic_Programming/2430.Maximum-Deletions-on-a-String/Readme.md b/Dynamic_Programming/2430.Maximum-Deletions-on-a-String/Readme.md new file mode 100644 index 000000000..e24bfcf9a --- /dev/null +++ b/Dynamic_Programming/2430.Maximum-Deletions-on-a-String/Readme.md @@ -0,0 +1,9 @@ +### 2430.Maximum-Deletions-on-a-String + +因为是一刀一刀地从头开始砍,显然我们会令dp[i]表示以i开头的字符串的maximum deletion。 + +对于状态的转移,我们不难想到尝试它的第一个刀的位置。假设我们想砍在位置j之前,那么就需要查看是否满足[i:j-1]和[j:j+j-i]这两段区间是否相等。如果是的话,就有`dp[i] = d[j]+1`. + +那么如何高效判断这两个分别以i和j开头的区间是否相等呢?我们可以用N^2的时间预处理,先得到任意两个位置i和j的最大公共前缀长度lcs。如果`lcs[i][j] >= j-i`,那么就意味着[i:j-1]和[j:j+j-i]这两段区间必然相等。事实上,对于i而言可能会有多个合适的j,所以`dp[i] = max{d[j]+1}` + +最终返回dp[0]. diff --git a/Dynamic_Programming/2431.Maximize-Total-Tastiness-of-Purchased-Fruits/2431.Maximize-Total-Tastiness-of-Purchased-Fruits.cpp b/Dynamic_Programming/2431.Maximize-Total-Tastiness-of-Purchased-Fruits/2431.Maximize-Total-Tastiness-of-Purchased-Fruits.cpp new file mode 100644 index 000000000..89ea96c2f --- /dev/null +++ b/Dynamic_Programming/2431.Maximize-Total-Tastiness-of-Purchased-Fruits/2431.Maximize-Total-Tastiness-of-Purchased-Fruits.cpp @@ -0,0 +1,36 @@ +class Solution { + int dp[1005][1005][6]; +public: + int maxTastiness(vector& price, vector& tastiness, int maxAmount, int maxCoupons) + { + int ret = 0; + dp[0][0][0] = 0; + if (price[0]<=maxAmount) + { + dp[0][price[0]][0] = tastiness[0]; + ret = tastiness[0]; + } + if (price[0]/2<=maxAmount && 1<=maxCoupons) + { + dp[0][price[0]/2][1] = tastiness[0]; + ret = tastiness[0]; + } + + for (int i=1; i=price[i]) + dp[i][j][k] = max(dp[i][j][k], dp[i-1][j-price[i]][k] + tastiness[i]); + if (j>=price[i]/2 && k>=1) + dp[i][j][k] = max(dp[i][j][k], dp[i-1][j-price[i]/2][k-1] + tastiness[i]); + + ret = max(ret, dp[i][j][k]); + } + + return ret; + + + } +}; diff --git a/Dynamic_Programming/2431.Maximize-Total-Tastiness-of-Purchased-Fruits/Readme.md b/Dynamic_Programming/2431.Maximize-Total-Tastiness-of-Purchased-Fruits/Readme.md new file mode 100644 index 000000000..a84b44348 --- /dev/null +++ b/Dynamic_Programming/2431.Maximize-Total-Tastiness-of-Purchased-Fruits/Readme.md @@ -0,0 +1,10 @@ +### 2431.Maximize-Total-Tastiness-of-Purchased-Fruits + +很常规的DP模式。令dp[i][j][k]表示前i个水果、花费j的钱、使用k张半价券,所能得到的最大tastiness。 + +显然,我们会考虑对第i个水果的决策: +1. 我们不买第i个水果,`dp[i][j][k] = dp[i-1][j][k]`; +2. 我们原价买第i个水果,`dp[i][j][k] = dp[i-1][j-price[i]][k] + tastiness[i]`; +3. 我们半价买第i个水果,`dp[i][j][k] = dp[i-1][j-price[i]/2][k-1] + tastiness[i]`; + +注意为了不出现越界,我们使用上述的转移方程时,需要对j和k加上约束。此外`i=0`时单独处理dp最为方便。 diff --git a/Dynamic_Programming/2435.Paths-in-Matrix-Whose-Sum-Is-Divisible-by-K/2435.Paths-in-Matrix-Whose-Sum-Is-Divisible-by-K.cpp b/Dynamic_Programming/2435.Paths-in-Matrix-Whose-Sum-Is-Divisible-by-K/2435.Paths-in-Matrix-Whose-Sum-Is-Divisible-by-K.cpp new file mode 100644 index 000000000..b9fd0a9eb --- /dev/null +++ b/Dynamic_Programming/2435.Paths-in-Matrix-Whose-Sum-Is-Divisible-by-K/2435.Paths-in-Matrix-Whose-Sum-Is-Divisible-by-K.cpp @@ -0,0 +1,37 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { +public: + int numberOfPaths(vector>& grid, int k) + { + int m = grid.size(), n = grid[0].size(); + vector>>dp(m, vector>(n, vector(k))); + + LL sum = 0; + for (int i=0; i& robot, vector>& factory) + { + int m = robot.size(); + int n = factory.size(); + + sort(robot.begin(), robot.end()); + sort(factory.begin(), factory.end()); + + for (int i=0; i& nums) + { + memset(dp, 0x3f, sizeof(dp)); + int n = nums.size(); + for (int i=0; i1) + dp[i] = min(dp[i], j==0?1:(dp[j-1]+1)); + } + + if (dp[n-1]==0x3f3f3f3f) + return -1; + return dp[n-1]; + } +}; diff --git a/Dynamic_Programming/2464.Minimum-Subarrays-in-a-Valid-Split/Readme.md b/Dynamic_Programming/2464.Minimum-Subarrays-in-a-Valid-Split/Readme.md new file mode 100644 index 000000000..c3de6598f --- /dev/null +++ b/Dynamic_Programming/2464.Minimum-Subarrays-in-a-Valid-Split/Readme.md @@ -0,0 +1,3 @@ +### 2464.Minimum-Subarrays-in-a-Valid-Split + +本题包装着数论问题,但本质其实就是一个基础型的dp。根据数据范围,o(N^2)的复杂度可解,因此遍历最后一段subarray的范围即可。 diff --git a/Dynamic_Programming/2472.Maximum-Number-of-Non-overlapping-Palindrome-Substrings/2472.Maximum-Number-of-Non-overlapping-Palindrome-Substrings.cpp b/Dynamic_Programming/2472.Maximum-Number-of-Non-overlapping-Palindrome-Substrings/2472.Maximum-Number-of-Non-overlapping-Palindrome-Substrings.cpp new file mode 100644 index 000000000..e35670f24 --- /dev/null +++ b/Dynamic_Programming/2472.Maximum-Number-of-Non-overlapping-Palindrome-Substrings/2472.Maximum-Number-of-Non-overlapping-Palindrome-Substrings.cpp @@ -0,0 +1,32 @@ +class Solution { + int isPalin[2001][2001]; +public: + int maxPalindromes(string s, int k) + { + int n = s.size(); + for (int i=0; idp(n); + for (int i=k-1; i=0 && !isprime(s[i-minLength]) && isprime(s[i-minLength+1])) + { + sum += dp[i-minLength][j-1]; + sum %= M; + } + if (!isprime(s[i])) { + dp[i][j] = sum; + } + } + } + return dp[n][K]; + } + + bool isprime(char ch) + { + return ch == '2' || ch == '3' || ch == '5' || ch == '7'; + } +}; diff --git a/Dynamic_Programming/2478.Number-of-Beautiful-Partitions/Readme.md b/Dynamic_Programming/2478.Number-of-Beautiful-Partitions/Readme.md new file mode 100644 index 000000000..8aff030ae --- /dev/null +++ b/Dynamic_Programming/2478.Number-of-Beautiful-Partitions/Readme.md @@ -0,0 +1,41 @@ +### 2478.Number-of-Beautiful-Partitions + +本题很容易想到常规的N^3的动态规划。令dp[i][j]表示前i个元素(1-index)分成j份的最优方案(即最多的切分数)。显然,我们关注的就是最后一个subarray的位置。如果i是合数,那么我们可以遍历所有i之前的位置k,如果满足`isprime(k)`, `!isprime(k+1)`,`i-k>=minLength`,那么就意味着我们可以在k后面切一刀,[k+1,i]作为最后一段。于是就有`dp[i][j] += dp[k][j-1]`。 + +代码如下: +```cpp +for (int i=1; i<=n; i++) + for (int j=1; j<=K; j++) + { + if (isprime(s[i])) { + continue; + } + for (int k=j; (k+minLength-1)<=i; k++) + { + if (isprime(s[k])) + dp[i][j] += dp[k-1][j-1]; + } + } +``` + +那么如何改进时间复杂度呢?我们观察这个状态转移方程,发现无论i是多少,dp[i][j]只与`sum{dp[k][j-1]}`有关,其中k是比i小的数。所以我们可以把j放在第一个循环(前两个循环互换没有任何影响),然后随着i的遍历,我们可同时累加与更新`sum{dp[k][j-1]}`得到一段适当的前缀和,这样直接就有`dp[i][j] = presum`即可。 + +于是改动后的代码 +```cpp +for (int j=1; j<=K; j++) +{ + LL sum = 0; + for (int i=1; i<=n; i++) + { + if (i-minLength>=0 && !isprime(s[i-minLength]) && isprime(s[i-minLength+1])) + { + sum += dp[i-minLength][j-1]; + sum %= M; + } + if (!isprime(s[i])) { + dp[i][j] = sum; + } + } +} +``` +最后答案是dp[n][K]。 diff --git a/Dynamic_Programming/2484.Count-Palindromic-Subsequences/2484.Count-Palindromic-Subsequences.cpp b/Dynamic_Programming/2484.Count-Palindromic-Subsequences/2484.Count-Palindromic-Subsequences.cpp new file mode 100644 index 000000000..0ed7d6b39 --- /dev/null +++ b/Dynamic_Programming/2484.Count-Palindromic-Subsequences/2484.Count-Palindromic-Subsequences.cpp @@ -0,0 +1,62 @@ +using LL = long long; +class Solution { + LL dp1[10005][10][10]; + LL dp2[10005][10][10]; + LL count1[10005][10]; + LL count2[10005][10]; + LL M = 1e9+7; +public: + int countPalindromes(string s) + { + int n = s.size(); + s = "#"+s; + + for (int j=0; j<=9; j++) + { + int sum = 0; + for (int i=1; i<=n; i++) + { + sum += (s[i]-'0'==j); + count1[i][j] = sum; + } + } + + for (int j=0; j<=9; j++) + { + int sum = 0; + for (int i=n; i>=1; i--) + { + sum += (s[i]-'0'==j); + count2[i][j] = sum; + } + } + + for (int i=2; i<=n; i++) + for (int j=0; j<=9; j++) + for (int k=0; k<=9; k++) + { + dp1[i][j][k] = dp1[i-1][j][k]; + if (s[i]=='0'+k) + dp1[i][j][k] = (dp1[i][j][k] + count1[i-1][j]) % M; + } + + for (int i=n-1; i>=1; i--) + for (int j=0; j<=9; j++) + for (int k=0; k<=9; k++) + { + dp2[i][j][k] = dp2[i+1][j][k]; + if (s[i]=='0'+k) + dp2[i][j][k] = (dp2[i][j][k] + count2[i+1][j]) % M; + } + + LL ret = 0; + for (int i=3; i<=n-2; i++) + for (int j=0; j<=9; j++) + for (int k=0; k<=9; k++) + { + ret += dp1[i-1][j][k] * dp2[i+1][j][k] % M; + ret %= M; + } + return ret; + } +}; diff --git a/Dynamic_Programming/2484.Count-Palindromic-Subsequences/Readme.md b/Dynamic_Programming/2484.Count-Palindromic-Subsequences/Readme.md new file mode 100644 index 000000000..11d483d9b --- /dev/null +++ b/Dynamic_Programming/2484.Count-Palindromic-Subsequences/Readme.md @@ -0,0 +1,7 @@ +### 2484.Count-Palindromic-Subsequences + +长度为5的回文串,意味着我们对中间的字符没有任何要求。剩下的镜像部分,本质只是两个字符的组合。考虑到本题的元素只是数字,只有0-9共10种可能,所以组合的方式只有100种。结合字符串的长度是1e4,基本可以判定时间复杂度就是10^6,状态变量定义为`dp1[i][j][k]`表示前i个元素的子串里,以j和k结尾的subsequence有多少。同理定义为`dp2[i][j][k]`表示后i个元素逆序来看的子串里,以j和k结尾的subsequence有多少。这样我们枚举长度为5的回文串的中间字符位置i,则有`ret+=dp[i-1][j][k]*dp[i+1][j][k]`. + +接下来考虑`dp1[i][j][k]`如何求解。依然从第i个元素下手。如果第i个元素没有贡献任何“以j,k结尾的新子串”,则有`dp1[i][j][k] += dp[i-1][j][k]`。如果第i个元素恰好是k,那么s[i]本身就可能贡献一个“以j,k结尾的新子串”,这个子串的数目取决于i之前出现了多少个j。所以我们还需要预处理得到一个`count1[i-1][j]`表示前i-1个元素里面有多少个j。因此就有`dp1[i][j][k] += count1[i-1][j]`. + +同理我们可以逆序处理得到count2和dp2. diff --git a/Dynamic_Programming/2518.Number-of-Great-Partitions/2518.Number-of-Great-Partitions.cpp b/Dynamic_Programming/2518.Number-of-Great-Partitions/2518.Number-of-Great-Partitions.cpp new file mode 100644 index 000000000..c5765728f --- /dev/null +++ b/Dynamic_Programming/2518.Number-of-Great-Partitions/2518.Number-of-Great-Partitions.cpp @@ -0,0 +1,35 @@ +using LL = long long; +class Solution { + LL dp[1005][1005]; + LL M = 1e9+7; +public: + int countPartitions(vector& nums, int k) + { + if (accumulate(nums.begin(), nums.end(), 0LL) < k*2) + return 0; + + int n = nums.size(); + nums.insert(nums.begin(), 0); + + dp[0][0] = 1; + + for (int i=1; i<=n; i++) + for (int s = 0; s=nums[i]) + dp[i][s] += dp[i-1][s-nums[i]]; + dp[i][s] %= M; + } + + LL total = 1; + for (int i=1; i<=n; i++) + total = total * 2 % M; + + LL invalid = 0; + for (int s=0; s=k`的dp值就是方案总数`2^i`(每个元素随机分入A或者B)所对应的补集。 + +那么如何计算dp[i][s]呢?这就是一个传统的背包问题。dp写法就是考虑是否选取第i个元素,如果选取则有`dp[i][s] = dp[i-1][s-nums[i]]`,如果不选取则有`dp[i][s] = dp[i-1][s]`,我们将这两种情况相加就是构造dp[i][s]的总的方案数。 + +于是,我们得到了`2^n - sum{dp[n][s], s=0,1,2,..k-1}`,表示构造group A元素和大于等于k的方案数。但是如何保证group B的元素和也大于等于k呢?我们就需要从中减去那些“group A元素和大等于k,且group B元素和小于k”的方案数。注意到,如果group A元素和大于等于k,言下之意必然有group B元素和小于k,这是我们在题目开头就保证的。所以“group A元素和大等于k,且group B元素和小于k”的方案数 => “group B元素和小于k”的方案数 => “group A元素和小于k”的方案数 => `sum{dp[n][s], s=0,1,2,..k-1}` + +所以最终答案就是`2^n - 2 * sum{dp[n][s], s=0,1,2,..k-1}` diff --git a/Dynamic_Programming/2522.Partition-String-Into-Substrings-With-Values-at-Most-K/2522.Partition-String-Into-Substrings-With-Values-at-Most-K.cpp b/Dynamic_Programming/2522.Partition-String-Into-Substrings-With-Values-at-Most-K/2522.Partition-String-Into-Substrings-With-Values-at-Most-K.cpp new file mode 100644 index 000000000..8f0ae14a5 --- /dev/null +++ b/Dynamic_Programming/2522.Partition-String-Into-Substrings-With-Values-at-Most-K/2522.Partition-String-Into-Substrings-With-Values-at-Most-K.cpp @@ -0,0 +1,28 @@ +class Solution { + int dp[100005]; +public: + int minimumPartition(string s, int k) + { + int n = s.size(); + int m = to_string(k).size(); + for (int i=0; ik) + return -1; + } + + s = "#"+s; + dp[0] = 0; + + for (int i=1; i<=n; i++) + { + if (i-m+1>=1 && stoi(s.substr(i-m+1, m)) <= k) + dp[i] = dp[i-m] + 1; + else + dp[i] = dp[max(0, i-m+1)] + 1; + } + + return dp[n]; + + } +}; diff --git a/Dynamic_Programming/2522.Partition-String-Into-Substrings-With-Values-at-Most-K/Readme.md b/Dynamic_Programming/2522.Partition-String-Into-Substrings-With-Values-at-Most-K/Readme.md new file mode 100644 index 000000000..f6072cc60 --- /dev/null +++ b/Dynamic_Programming/2522.Partition-String-Into-Substrings-With-Values-at-Most-K/Readme.md @@ -0,0 +1,7 @@ +### 2522.Partition-String-Into-Substrings-With-Values-at-Most-K + +这是一个很常规的dp题。我们令dp[i]表示前i个元素能够分成的最少分组。着眼点就是寻找最后一个区间的范围[j:i]。如果s[j:i]是小于等于k的,那么就有`dp[i] = dp[j-1]+1`。 + +那么我们是否需要遍历j找到最小的dp[j-1]呢?这样就是一个n^2的算法。事实上因为dp[i]必然是单调递增的,所以我们只需要找尽可能小的j,即能找到尽可能小的dp[j-1]。考虑k的长度是m,那么我们只需要考察最后一个区间长度如果是m能否可行。不可行的话,取最后一个区间的长度是m-1即可。 + +注意一下无解的情况。如果m长度是1,且s[i]里有一个字符大于k,那么说明即使区间长度是1也无法满足要求。 diff --git a/Dynamic_Programming/2547.Minimum-Cost-to-Split-an-Array/2547.Minimum-Cost-to-Split-an-Array.cpp b/Dynamic_Programming/2547.Minimum-Cost-to-Split-an-Array/2547.Minimum-Cost-to-Split-an-Array.cpp new file mode 100644 index 000000000..6a2189e12 --- /dev/null +++ b/Dynamic_Programming/2547.Minimum-Cost-to-Split-an-Array/2547.Minimum-Cost-to-Split-an-Array.cpp @@ -0,0 +1,29 @@ +class Solution { + int dp[1005]; +public: + int minCost(vector& nums, int k) + { + int n = nums.size(); + + for (int i=0; iMap; + int score = 0; + dp[i] = INT_MAX; + for (int j=i; j>=0; j--) + { + Map[nums[j]]++; + if (Map[nums[j]]==2) + score += 2; + else if (Map[nums[j]]>2) + score += 1; + + if (j>=1) + dp[i] = min(dp[i], dp[j-1] + score + k); + else + dp[i] = min(dp[i], score + k); + } + } + return dp[n-1]; + } +}; diff --git a/Dynamic_Programming/2547.Minimum-Cost-to-Split-an-Array/Readme.md b/Dynamic_Programming/2547.Minimum-Cost-to-Split-an-Array/Readme.md new file mode 100644 index 000000000..fa212038a --- /dev/null +++ b/Dynamic_Programming/2547.Minimum-Cost-to-Split-an-Array/Readme.md @@ -0,0 +1,3 @@ +### 2547.Minimum-Cost-to-Split-an-Array + +很明显这是一个动态规划。我们令dp[i]表示前i个元素进行分组能够得到的最大值。我们关注截止到i为止最后一个分组的区间,故遍历一个变量j从i往前走一遍,则有dp[i]=dp[j-1]+score[j:i]。注意到移动j的过程中,score[j:i]可以用o(1)的时间得到更新。算法的整体时间复杂度就是o(n^2)。 diff --git a/Dynamic_Programming/2572.Count-the-Number-of-Square-Free-Subsets/2572.Count-the-Number-of-Square-Free-Subsets.cpp b/Dynamic_Programming/2572.Count-the-Number-of-Square-Free-Subsets/2572.Count-the-Number-of-Square-Free-Subsets.cpp new file mode 100644 index 000000000..648ae675d --- /dev/null +++ b/Dynamic_Programming/2572.Count-the-Number-of-Square-Free-Subsets/2572.Count-the-Number-of-Square-Free-Subsets.cpp @@ -0,0 +1,52 @@ +using LL = long long; +class Solution { + LL dp[1005][1025]; + vectorprimes = {2,3,5,7,11,13,17,19,23,29}; + LL M = 1e9+7; +public: + int squareFreeSubsets(vector& nums) + { + int n = nums.size(); + nums.insert(nums.begin(), 0); + + LL ret = 0; + dp[0][0] = 1; + for (int i=1; i<=n; i++) + for (int state = 0; state < (1<<10); state++) + { + if (nums[i]==1) + { + dp[i][state] = dp[i-1][state] * 2 % M; + } + else + { + dp[i][state] = dp[i-1][state]; + int s = helper(nums[i]); + if (s!=-1 && (state&s)==s) + dp[i][state] = (dp[i][state] + dp[i-1][state-s]) % M; + } + if (i==n) + ret = (ret + dp[i][state]) % M; + } + return (ret+M-1)%M; + } + + int helper(int x) + { + int s = 0; + for (int i=0; i 1) + return -1; + else if (count==1) + s += (1<>& types) + { + int n = types.size(); + types.insert(types.begin(), {0,0}); + dp[0][0] = 1; + for (int i=1; i<=n; i++) + for (int j=0; j<=target; j++) + { + for (int k=0; k<=types[i][0]; k++) + { + if (k*types[i][1]>j) break; + dp[i][j] += dp[i-1][j- k*types[i][1]]; + dp[i][j] %= M; + } + } + return dp[n][target]; + } +}; diff --git a/Dynamic_Programming/2585.Number-of-Ways-to-Earn-Points/Readme.md b/Dynamic_Programming/2585.Number-of-Ways-to-Earn-Points/Readme.md new file mode 100644 index 000000000..1b6298c6a --- /dev/null +++ b/Dynamic_Programming/2585.Number-of-Ways-to-Earn-Points/Readme.md @@ -0,0 +1,15 @@ +### 2585.Number-of-Ways-to-Earn-Points + +非常常规的背包DP。将第二个下标设计为已经取得的分数。令dp[i][j]表示前i种题目里恰好取得j分的方案数。对于每种题目类型,我们尝试取不同的数目k。所以总共三层循环。比如,当第i种题目取k道题时,那么方案就取决于前i-1中题目里取`j- k*types[i][1]`分的方案数。 +```cpp +for (int i=1; i<=n; i++) + for (int j=0; j<=target; j++) + { + for (int k=0; k<=types[i][0]; k++) + { + if (k*types[i][1]>j) break; + dp[i][j] += dp[i-1][j- k*types[i][1]]; + dp[i][j] %= M; + } + } +``` diff --git a/Dynamic_Programming/2638.Count-the-Number-of-K-Free-Subsets/2638.Count-the-Number-of-K-Free-Subsets.cpp b/Dynamic_Programming/2638.Count-the-Number-of-K-Free-Subsets/2638.Count-the-Number-of-K-Free-Subsets.cpp new file mode 100644 index 000000000..368b6575f --- /dev/null +++ b/Dynamic_Programming/2638.Count-the-Number-of-K-Free-Subsets/2638.Count-the-Number-of-K-Free-Subsets.cpp @@ -0,0 +1,36 @@ +class Solution { +public: + long long countTheNumOfKFreeSubsets(vector& nums, int k) + { + vector>arr(k); + for (int x: nums) + arr[x%k].push_back(x); + + long long ret = 1; + for (int i=0; i& nums, int k) + { + sort(nums.begin(), nums.end()); + long long take = 0, no_take = 1; + for (int i=0; i=1 && nums[i] == nums[i-1]+k) + { + take = no_take_temp; + no_take = take_temp + no_take_temp; + } + else + { + take = take_temp + no_take_temp; + no_take = take_temp + no_take_temp; + } + } + return take + no_take; + } +}; diff --git a/Dynamic_Programming/2638.Count-the-Number-of-K-Free-Subsets/Readme.md b/Dynamic_Programming/2638.Count-the-Number-of-K-Free-Subsets/Readme.md new file mode 100644 index 000000000..6b7d53111 --- /dev/null +++ b/Dynamic_Programming/2638.Count-the-Number-of-K-Free-Subsets/Readme.md @@ -0,0 +1,7 @@ +### 2638.Count-the-Number-of-K-Free-Subsets + +此题和2597一模一样。将所有元素按照对k的模分组。 + +对于每组里的元素进行排序后,可以取任意的组合,但是相邻两个元素如果相差为k的话就不能同时取。这就是一个典型的house robber。 + +对于不同的组,彼此的取法互不影响,所以是乘法关系。 diff --git a/Dynamic_Programming/2713.Maximum-Strictly-Inreasing-Cells-in-a-Matrix/2713.Maximum-Strictly-Inreasing-Cells-in-a-Matrix.cpp b/Dynamic_Programming/2713.Maximum-Strictly-Inreasing-Cells-in-a-Matrix/2713.Maximum-Strictly-Inreasing-Cells-in-a-Matrix.cpp new file mode 100644 index 000000000..ab794e709 --- /dev/null +++ b/Dynamic_Programming/2713.Maximum-Strictly-Inreasing-Cells-in-a-Matrix/2713.Maximum-Strictly-Inreasing-Cells-in-a-Matrix.cpp @@ -0,0 +1,42 @@ +using AI3 = array; +class Solution { +public: + int maxIncreasingCells(vector>& mat) + { + int m = mat.size(), n = mat[0].size(); + vectornums; + for (int i=0; i> rows(m); + vector> cols(n); + + for (int i=0; isecond + 1); + + iter = cols[j].lower_bound(val); + iter = prev(iter); + len = max(len, iter->second + 1); + + rows[i][val] = max(len, rows[i][val]); + cols[j][val] = max(len, cols[j][val]); + + ret = max(ret, len); + } + + return ret; + } +}; diff --git a/Dynamic_Programming/2713.Maximum-Strictly-Inreasing-Cells-in-a-Matrix/Reamdme.md b/Dynamic_Programming/2713.Maximum-Strictly-Inreasing-Cells-in-a-Matrix/Reamdme.md new file mode 100644 index 000000000..71a10c72e --- /dev/null +++ b/Dynamic_Programming/2713.Maximum-Strictly-Inreasing-Cells-in-a-Matrix/Reamdme.md @@ -0,0 +1,7 @@ +### 2713.Maximum-Strictly-Inreasing-Cells-in-a-Matrix + +我们肯定是将所有的元素排序之后逐个处理。对于(i,j)考虑以它为结尾的递增序列可以多少长,必然会查看序列里它之前的元素,而前一个元素必然是在同一行或者同一列。所以我们只要在同行同列里查找所有比`mat[i][j]`小的位置(x,y)。以(x,y)为结尾的递增序列可以多少长,那么以(i,j)为结尾的递增序列长度就可以增加1。问题就转化为了递归或者动态规划。 + +接下来的问题是,如果扫描同行同列的所有元素,那么总的时间复杂度是`o(MN*M)`。事实上我们只需要查看同行(或者同列)里元素值恰好比`mat[i][j]`小的位置和对应的序列长度即可。所以我们给每行(以及每列)维护一个key有序的map,比如`rows[i][v] = 3`表示第三行里,以值为v的格子为结尾的递增序列的最大长度是3. 所以对于(i,j),我们用`prev(rows[i].lower_bound(mat[i][j])`就能定位最后一个恰好比mat[i][j]`小的位置。 + +注意在对所有的rows[i]和cols[j],初始化的时候添加一个`{INT_MIN, 0}`的key-val对,可以避免lower_bound出现越界。 diff --git a/Dynamic_Programming/2742.Painting-the-Walls/2742.Painting-the-Walls_v1.cpp b/Dynamic_Programming/2742.Painting-the-Walls/2742.Painting-the-Walls_v1.cpp new file mode 100644 index 000000000..eb19d2224 --- /dev/null +++ b/Dynamic_Programming/2742.Painting-the-Walls/2742.Painting-the-Walls_v1.cpp @@ -0,0 +1,31 @@ +class Solution { + int dp[505][505*2]; + int OFFSET = 505; +public: + int paintWalls(vector& cost, vector& time) + { + int n = cost.size(); + cost.insert(cost.begin(), 0); + time.insert(time.begin(), 0); + + for (int i=0; i<=n; i++) + for (int j=-n; j<=n; j++) + dp[i][j+OFFSET] = INT_MAX/2; + dp[0][OFFSET] = 0; + + for (int i=0; i& cost, vector& time) + { + int n = cost.size(); + cost.insert(cost.begin(),0); + time.insert(time.begin(),0); + + for (int i=0; i<=n; i++) + for (int j=0; j<=n; j++) + dp[i][j] = INT_MAX/2; + dp[0][0] = 0; + + for (int i=0; i=0`. + +我们遍历j的范围时,只需要从-n到n。这是因为如果j<=-n,说明至少使用了n个小时的免费工人,必然已经把任务完成。如果j>=n,说明至少使用了n个小时的付费工人,根据规则我们必然可以搭配n个小时的免费工人,也必然已经把任务完成了。所以dp计算的二维循环的时间复杂度是o(n^2). + +注意,本题的转移方程是“从现在到未来的形式”。即已知dp[i][j],我们考虑第i+1个任务时,根据付费还是免费工人两种方案,给未来的两个状态提供优化: +```cpp +dp[i+1][j-1] = min(dp[i+1][j-1], dp[i][j]); +dp[i+1][j+time[i+1]] = min(dp[i+1][j+time[i+1]], dp[i][j+OFFSET]+cost[i+1]); +``` +并且我们要注意`j+time[i+1]`可能会大于n,我们要取cap。这也是我们无法用“从现在到未来的形式”的原因,因为我们无法穷举`dp[i][n]=...`的来源。 + +#### 解法2: +此题还有另外一种巧解。我们将每个付费工人强制捆绑若干个免费工人,即看做可以花cost[i]的代价实现time[i]+1的任务。问至少(不是恰好)实现n个任务的最小代价。这是因为“强制捆绑若干个免费工人”的做法无法保证总完成的任务恰好n,极有可能超过n,如果那种情况发生,我们可以再任意踢掉免费的工人(将完成任务的数量降到n)。 + +此时我们定义状态dp[i][j]表示前i个工人(不一定都用)完成j个任务的最小代价。那么这就是一个典型的背包问题。 + +同理,我们也得用“从现在到未来的形式”,即已知dp[i][j],我们考虑第i+1个工人,根据是否雇佣他两种方案,给未来的两个状态提供优化: +```cpp +dp[i+1][j+time[i+1]+1] = min(dp[i+1][j+time[i+1]+1], dp[i][j]+cost[i+1]); +dp[i+1][j] = min(dp[i+1][j], dp[i][j]); +``` +同理,我们也要注意`j+time[i+1]+1`必须cap by n。 + +最终返回的答案是dp[n][n]. diff --git a/Dynamic_Programming/2786.Visit-Array-Positions-to-Maximize-Score/2786.Visit-Array-Positions-to-Maximize-Score.cpp b/Dynamic_Programming/2786.Visit-Array-Positions-to-Maximize-Score/2786.Visit-Array-Positions-to-Maximize-Score.cpp new file mode 100644 index 000000000..4db44261b --- /dev/null +++ b/Dynamic_Programming/2786.Visit-Array-Positions-to-Maximize-Score/2786.Visit-Array-Positions-to-Maximize-Score.cpp @@ -0,0 +1,34 @@ +using LL = long long; +class Solution { +public: + long long maxScore(vector& nums, int x) + { + int n = nums.size(); + + vector>dp(n, vector(2,LLONG_MIN/2)); + if (nums[0]%2==0) + dp[0][0] = nums[0]; + else + dp[0][1] = nums[0]; + + for (int i=1; i= num; s--) + { + dp[s] += dp[s-num]; + dp[s] %= M; + } + } + + return dp[n]; + } +}; diff --git a/Dynamic_Programming/2787.Ways-to-Express-an-Integer-as-Sum-of-Powers/Readme.md b/Dynamic_Programming/2787.Ways-to-Express-an-Integer-as-Sum-of-Powers/Readme.md new file mode 100644 index 000000000..d5cd9e540 --- /dev/null +++ b/Dynamic_Programming/2787.Ways-to-Express-an-Integer-as-Sum-of-Powers/Readme.md @@ -0,0 +1,13 @@ +### 2787.Ways-to-Express-an-Integer-as-Sum-of-Powers + +#### 解法1: +令dp[i][j]表示数字i可以分解的方案数目,并且要求分解出的最大的因子不能超过j。 + +如果该分解不包含`j^x`,那么就有`dp[i][j] = dp[i][j-1]`; 如果该分解包含了`j^x`,并且`i>=j^x`,则有`dp[i][j] = dp[i-j^x][j-1]`. 两者之后即是dp[i][j]。 + +最终答案是dp[n][n]. + +#### 解法2: +令dp[i]表示数字i可以分解的方案数目。 + +我们从小到大依次考虑因子1,2,3,...n的使用。当可以使用j^x时,所有的dp数列可以更新:`dp_new[i] = dp_old[i] + dp_old[i-j^x]`. 这样刷新n遍dp数组,最终的答案是dp[n]. diff --git a/Dynamic_Programming/2809.Minimum-Time-to-Make-Array-Sum-At-Most-x/2809.Minimum-Time-to-Make-Array-Sum-At-Most-x.cpp b/Dynamic_Programming/2809.Minimum-Time-to-Make-Array-Sum-At-Most-x/2809.Minimum-Time-to-Make-Array-Sum-At-Most-x.cpp new file mode 100644 index 000000000..da9c00f6f --- /dev/null +++ b/Dynamic_Programming/2809.Minimum-Time-to-Make-Array-Sum-At-Most-x/2809.Minimum-Time-to-Make-Array-Sum-At-Most-x.cpp @@ -0,0 +1,35 @@ +using PII = pair; +using LL = long long; +class Solution { + LL dp[1005][1005]; + LL presum[1005]; + int n; +public: + int minimumTime(vector& nums1, vector& nums2, int x) + { + n = nums1.size(); + + vectorarr; + for (int i=0; i=1) dp[i][j] = min(dp[i][j], dp[i-1][j-1] + presum[i-1]); + } + + for (int t=0; t<=n; t++) + if (dp[n][t]<=x) return t; + return -1; + } +}; diff --git a/Dynamic_Programming/2809.Minimum-Time-to-Make-Array-Sum-At-Most-x/Readme.md b/Dynamic_Programming/2809.Minimum-Time-to-Make-Array-Sum-At-Most-x/Readme.md new file mode 100644 index 000000000..ca83884e3 --- /dev/null +++ b/Dynamic_Programming/2809.Minimum-Time-to-Make-Array-Sum-At-Most-x/Readme.md @@ -0,0 +1,28 @@ +### 2809.Minimum-Time-to-Make-Array-Sum-At-Most-x + +首先我们要知道,我们不会给同一个位置的数字重复清零操作,因为后一次清零会完全浪费前一次清零。所以我们最多只会进行n次清零。 + +其次,这道题给人有一种错觉,使用清零次数与达成目标之间存在单调性的关系,即用的清零次数越多,就越容易实现sum<=x的目标。 + +我们先承认这种错觉。那么它会引导我们用二分搜值的思想,即给定清零次数T,我们是否能构造一种方案使得`sum<=x`呢? 我们想象一下,使用了T次清零之后,剩余的sum必然是这种形式 +``` +sum = {0 + nums2[a]*1 + nums2[b]*2 + ... + nums2[c] * (T-1)} + + {nums1[x]+nums2[x]*T + nums1[y]+nums2[y]*T + .... nums1[z]+nums2[z]*T} +``` +也就是说,我们需要将元素分为两部分,前一部分是apply了清零操作,后一部分是没有apply清零操作。显然,对于前一部分,为了使得sum最小,我们会按照nums2的数值倒序排列。对于后一部分,对于顺序没有要求。 + +那么我们该如何将元素进行最优的分割呢?暴力尝试的话需要2^n。有更好的方法吗?其实这可以考虑成01背包问题,我们将所有元素按照nums2升序排序:每个元素有“取”或者“不取”两种决策,求取T个元素时的最小代价。所以我们很容易定义dp[i][j]表示前i个元素里面清零j个元素的最小代价。 +1. 当清零第i个元素时,因为nums2值最大,第i个元素必然是最后一个被清零才合算。说明我们在前i-1个元素依然用了j-1次清零,第j次清零使得nums[i]以0进入代价,同时也让之前的i-1个元素多了一轮“回血”,故增加的代价是nums2[1:i-1]。 +2. 当不清零第i个元素时,说明第i个元素此时经历了j轮回血,故增加的代价是`nums1[i]+nums2[i]*j`。 + +综上我们可以计算出任意的dp[i][j]. 通过`dp[i][T]<=x`就可以判断能否通过T次清零实现目标。 + +但是注意,本题里的单调性是不成立的。例如 +``` +[9,10,10,5,2,4] +[2,4,0,3,3,4] +40 +``` +这组数据。不清零已经符合条件。清零1次,反而结果最小只能是42了。有可能确实没有单调性。 + +事实上,一次DP已经解决所有的问题。我们只需寻找最小的j,使得`dp[i][j]<=x`即是答案。 diff --git a/Dynamic_Programming/2826.Sorting-Three-Groups/2826.Sorting-Three-Groups.cpp b/Dynamic_Programming/2826.Sorting-Three-Groups/2826.Sorting-Three-Groups.cpp new file mode 100644 index 000000000..f3220d39a --- /dev/null +++ b/Dynamic_Programming/2826.Sorting-Three-Groups/2826.Sorting-Three-Groups.cpp @@ -0,0 +1,18 @@ +class Solution { + int dp[105][4]; +public: + int minimumOperations(vector& nums) + { + int n = nums.size(); + nums.insert(nums.begin(), 0); + for (int i=1; i<=n; i++) + { + dp[i][1] = dp[i-1][1] + (nums[i]!=1); + dp[i][2] = min(dp[i-1][1], dp[i-1][2]) + (nums[i]!=2); + dp[i][3] = min(min(dp[i-1][1], dp[i-1][2]), dp[i-1][3]) + (nums[i]!=3); + } + + return min(min(dp[n][1], dp[n][2]), dp[n][3]); + + } +}; diff --git a/Dynamic_Programming/2826.Sorting-Three-Groups/Readme.md b/Dynamic_Programming/2826.Sorting-Three-Groups/Readme.md new file mode 100644 index 000000000..f2d07f8f1 --- /dev/null +++ b/Dynamic_Programming/2826.Sorting-Three-Groups/Readme.md @@ -0,0 +1,9 @@ +### 2826.Sorting-Three-Groups + +令dp[i][j]表示截止到第i个元素为止构成j个group的最小代价,其中j=1,2,3. 显然有 +``` +dp[i][1] = dp[i-1][1] + (nums[i]!=1); +dp[i][2] = min(dp[i-1][1], dp[i-1][2]) + (nums[i]!=2); +dp[i][3] = min(min(dp[i-1][1], dp[i-1][2]), dp[i-1][3]) + (nums[i]!=3); +``` +最终返回dp[n][1],dp[n][2],dp[n][3]中的最小值。 diff --git a/Dynamic_Programming/2830.Maximize-the-Profit-as-the-Salesman/2830.Maximize-the-Profit-as-the-Salesman.cpp b/Dynamic_Programming/2830.Maximize-the-Profit-as-the-Salesman/2830.Maximize-the-Profit-as-the-Salesman.cpp new file mode 100644 index 000000000..54286a5f4 --- /dev/null +++ b/Dynamic_Programming/2830.Maximize-the-Profit-as-the-Salesman/2830.Maximize-the-Profit-as-the-Salesman.cpp @@ -0,0 +1,21 @@ +class Solution { + int dp[100005]; +public: + int maximizeTheProfit(int n, vector>& offers) + { + + unordered_map>>Map; + for (auto& offer:offers) + Map[offer[1]+1].push_back({offer[0]+1, offer[2]}); + + for (int i=1; i<=n; i++) + { + dp[i] = dp[i-1]; + for (auto& [start, val]: Map[i]) + dp[i] = max(dp[i], dp[start-1] + val); + } + + return dp[n]; + + } +}; diff --git a/Dynamic_Programming/2830.Maximize-the-Profit-as-the-Salesman/Readme.md b/Dynamic_Programming/2830.Maximize-the-Profit-as-the-Salesman/Readme.md new file mode 100644 index 000000000..8718d73d7 --- /dev/null +++ b/Dynamic_Programming/2830.Maximize-the-Profit-as-the-Salesman/Readme.md @@ -0,0 +1,7 @@ +### 2830.Maximize-the-Profit-as-the-Salesman + +此题和`2008.Maximum-Earnings-From-Taxi`几乎一样。考虑到`the number of houses`只有1e5级别,最简单的方法就是令dp[i]前i个房子所能得到的最大收益。 + +我们遍历以i结尾的offer,如果该offer的跨度是从[j,i],价值是v,那么我们就有一种转移的方法`dp[i]=dp[j-1]+val`. 除此之外,如果不考虑任何offer,则有`dp[i]=dp[i-1]`. 我们从中选一个最优解作为dp[i]即可。 + +如果本题里houses的数目是1e9级别,我们就需要进行离散化的处理,将所有offer的右边界组成数组T,排序后进行遍历。对于跨度是[t1,t2]的offer,我们需要用二分法在T中找到最后一个小于等于t1的下标,再进行dp的转移。 diff --git a/Dynamic_Programming/2851.String-Transformation/2851.String-Transformation.cpp b/Dynamic_Programming/2851.String-Transformation/2851.String-Transformation.cpp new file mode 100644 index 000000000..11bf0bf25 --- /dev/null +++ b/Dynamic_Programming/2851.String-Transformation/2851.String-Transformation.cpp @@ -0,0 +1,82 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { +public: + int numberOfWays(string s, string t, long long k) + { + string ss = s+s; + ss.pop_back(); + int p = strStr(ss,t); + + int n = s.size(); + vector T = {n-p-1, n-p, p, p-1}; + vector Tk = quickMul(T, k); + + if (s==t) + return Tk[3]; // Tk * (0, 1)' + else + return Tk[2]; // Tk * (1, 0)' + } + + vector multiply(vectormat1, vectormat2) + { + // a1 b1 a2 b2 + // c1 d1 c2 d2 + LL a1 = mat1[0], b1 = mat1[1], c1 = mat1[2], d1 = mat1[3]; + LL a2 = mat2[0], b2 = mat2[1], c2 = mat2[2], d2 = mat2[3]; + return {(a1*a2+b1*c2)%M, (a1*b2+b1*d2)%M, (c1*a2+d1*c2)%M, (c1*b2+d1*d2)%M}; + } + + vector quickMul(vectormat, LL N) { + if (N == 0) { + return {1,0,0,1}; + } + vector mat2 = quickMul(mat, N/2); + if (N%2==0) + return multiply(mat2, mat2); + else + return multiply(multiply(mat2, mat2), mat); + } + + int strStr(string haystack, string needle) + { + int count = 0; + + int n = haystack.size(); + int m = needle.size(); + + vector suf = preprocess(needle); + + vectordp(n,0); + dp[0] = (haystack[0]==needle[0]); + if (m==1 && dp[0]==1) + count++; + + for (int i=1; i0 && haystack[i]!=needle[j]) + j = suf[j-1]; + dp[i] = j + (haystack[i]==needle[j]); + if (dp[i]==needle.size()) + count++; + } + return count; + } + + vector preprocess(string s) + { + int n = s.size(); + vectordp(n,0); + for (int i=1; i=1 && s[j]!=s[i]) + { + j = dp[j-1]; + } + dp[i] = j + (s[j]==s[i]); + } + return dp; + } +}; diff --git a/Dynamic_Programming/2851.String-Transformation/Readme.md b/Dynamic_Programming/2851.String-Transformation/Readme.md new file mode 100644 index 000000000..73463292b --- /dev/null +++ b/Dynamic_Programming/2851.String-Transformation/Readme.md @@ -0,0 +1,26 @@ +### 2851.String-Transformation + +首先,本题中的操作相当于切牌。无论一次切最后k张牌,都等效于切k次最后一张牌。最终得到的序列依然是原序列的shift而已。我们记s(i)表示以将字符串s调整后、变成以原来第i个元素为首的一个shift、 + +显然,只有对应部分的i,可以使得`s(i)=t`。我们可以先用KMP算法,算出t在`s+s`中能匹配几次。我们就可以记录有p种shift使得`s(i)=t`,其中`p<=n`. + +对于每次操作,我们有n-1次选择(对应不同的shift),那么经过k次操作之后,s(i)的分布是什么呢?我们特别关心上述的p种shift,因为它们对应着我们想要的答案。 + +我们令f[j]表示经过j次操作后不是想要的shift(我们称为未匹配)的操作数目(也就是字串数目),令g[j]表示经过j次操作后恰是想要的shift(称为匹配)的操作数(也就是字串数目)。我们有动态转移方程: +``` +f[j] = (n-p-1)*f[j-1] + (n-p)*g[j-1] +g[j] = p*f[j-1] + (p-1)*g[j-1] +``` +第一行的解释:对于j-1轮不匹配的字串,下一轮有n-p-1种操作依然得到不匹配的字串(因为不能shift成自己)。对于j-1轮已经匹配的字串,下一轮有n-p种操作变成不匹配的字串。同理第二行的解释:对于j-1轮不匹配的字串,下一轮有p种操作变成匹配的字串。对于j-1轮已经匹配的字串,下一轮有p-1种操作依然变成匹配的字串(因为不能shift成自己)。 + +所以我们有状态转移 (f,g)'(j) = T * (f,g)'(j-1),其中转移矩阵 +``` +T = [n-p-1, n-p + p, p-1 ] +``` +所以第k轮操作之后,(f,g)'(k) = T^k * (f,g)'(0). 注意,T^k依然是一个2x2的矩阵。 + +其中如果初始时s==t,那么(f,g)(0) = {0, 1},否则 (f,g)(0) = {1, 0}。 另外`T^k`可以用快速幂的思想,用log(k)的时间计算。最后记得再与初始状态`(f,g)'(0)`相乘。 + +由此我们计算出 (f,g)(k),得到第k轮时变成未匹配字串的数目,以及变成匹配字串的数目(答案)。 + diff --git a/Dynamic_Programming/2896.Apply-Operations-to-Make-Two-Strings-Equal/2896.Apply-Operations-to-Make-Two-Strings-Equal_v1.cpp b/Dynamic_Programming/2896.Apply-Operations-to-Make-Two-Strings-Equal/2896.Apply-Operations-to-Make-Two-Strings-Equal_v1.cpp new file mode 100644 index 000000000..0829893d3 --- /dev/null +++ b/Dynamic_Programming/2896.Apply-Operations-to-Make-Two-Strings-Equal/2896.Apply-Operations-to-Make-Two-Strings-Equal_v1.cpp @@ -0,0 +1,35 @@ +class Solution { +public: + int minOperations(string s1, string s2, int x) + { + int ret = 0; + + vectornums; + for (int i=0; i>dp(n, vector(n, INT_MAX/2)); + for (int i=0; i+1nums; + for (int i=0; i>dp(n+1, vector(n+1, INT_MAX/2)); + dp[0][0] = 0; + + for (int i=1; i<=n; i++) + for (int j=0; j<=1; j++) + { + if (i-2>=0) + dp[i][j] = min(dp[i][j], dp[i-2][j] + (nums[i]-nums[i-1])); + + if (j-1>=0 && j-1<=i-1) + dp[i][j] = min(dp[i][j], dp[i-1][j-1] + x); + + if (j+1<=i-1) + dp[i][j] = min(dp[i][j], dp[i-1][j+1]); + } + + + return dp[n][0]; + } +}; diff --git a/Dynamic_Programming/2896.Apply-Operations-to-Make-Two-Strings-Equal/Readme.md b/Dynamic_Programming/2896.Apply-Operations-to-Make-Two-Strings-Equal/Readme.md new file mode 100644 index 000000000..72e39e00a --- /dev/null +++ b/Dynamic_Programming/2896.Apply-Operations-to-Make-Two-Strings-Equal/Readme.md @@ -0,0 +1,47 @@ +### 2896.Apply-Operations-to-Make-Two-Strings-Equal + +注意到,如果从i开始的、连续操作k次相邻元素的flip(每次代价为1),本质上就是将i和i+k距离k的两个元素flip,其他元素保持不变,代价就是k。 + +所以,我们直接将s1和s2里面元素不同的index拿出来放在nums数组里。于是任务就是:每次在nums里挑两个(未访问过的)元素i与j,代价是nums[j]-nums[i],或者x。问最少花多少代价能将nums全部访问。当然,nums的元素个数必须是偶数,否则无解。 + +我们特别注意到,对于第一种操作,只会发生在nums里的两个相邻元素之间。为什么呢?假设有4个元素`p,...,k,j,i`,其中`k,j,i`是相邻的。如果我们将k与i按照第一种操作配对,代价是nums[i]-nums[k];而将j与[k,i]之外的某个p配对,代价是c(p,j)。我们发现,`nums[i]-nums[k] >= nums[i]-nums[j]`,且`cost(p,j) >= cost(p,k)`,所以有`nums[i]-nums[k]+cost(p,j) >= nums[i]-nums[j]+ cost(p,k)`,也就是说不如将“i与j配对,k与p配对”来的更优。 + +接下来思考整个问题。首先要明确并没有任何贪心的方法。每次如何挑选两个元素,并没有特定的规律,最优解会随着数据的不同有各种不同的表现。我们只能用DP或者搜索的方式来解。 + +### 解法1:o(n^3) +最容易想到的是一个o(N^3)的区间DP。我们想得到区间的最优解dp[i][j],只有两种拆解的方式: +1. 遍历一个中间的分界点k,我们先将[i:k]处理完,再将[k+1:j]处理完,那么dp[i][j]就是这两部分最优代价的和。 +2. 最后一个访问的pair是(i,j),所以dp[i][j] = dp[i+1][j-1] + cost(i,j). + +最终取最优的解作为dp[i][j]. 大致的代价如下 +```cpp +for (int d = 1; d<=n; d++) { + for (int i=0; i+d-1>arr; + int count0 = 0; +public: + int countSubMultisets(vector& nums, int l, int r) + { + unordered_mapMap; + for (int x: nums) + { + if (x==0) count0++; + else Map[x]++; + } + + for (auto& p:Map) + arr.push_back(p); + + arr.insert(arr.begin(), {0,0}); + + return (helper(r) - helper(l-1) + M) % M; + } + + int helper(int limit) + { + if (limit<0) return 0; + + int n = arr.size() - 1; + + vector>dp(n+1, vector(limit+1, 0)); + + dp[0][0] = 1; + + for (int i=1; i<=n; i++) + { + auto [v, c] = arr[i]; + for (int j=0; j<=limit; j++) + { + dp[i][j] = (jdp(p1*p2+1); + dp[0] = 1; + int ret = 0; + for (int i=1; i<=p1*p2; i++) + { + dp[i] = (i>=p1 && dp[i-p1]) || (i>=p2 && dp[i-p2]); + if (dp[i]==0) ret = max(ret, i); + } + return ret; + } +}; diff --git a/Dynamic_Programming/2979.Most-Expensive-Item-That-Can-Not-Be-Bought/Readme.md b/Dynamic_Programming/2979.Most-Expensive-Item-That-Can-Not-Be-Bought/Readme.md new file mode 100644 index 000000000..ab903b0cb --- /dev/null +++ b/Dynamic_Programming/2979.Most-Expensive-Item-That-Can-Not-Be-Bought/Readme.md @@ -0,0 +1,5 @@ +### 2979.Most-Expensive-Item-That-Can-Not-Be-Bought + +本题是给出两个质数p1和p2,求不能写成p1与p2的线性组合的最大自然数。此题有数学解,就是`p1*p2-p1-p2`. + +事实上此题有常规的DP解法。令dp[i]表示i是否能写成p1和p2的线性组合,则有`dp[i]=dp[i-p1]||dp[i-p2]`。当我们尝试到`i=p1*p2`时即可停止。事实上大于`p1*p2`的自然数必然能写成两者的线性组合。 diff --git a/Dynamic_Programming/3018.Maximum-Number-of-Removal-Queries-That-Can-Be-Processed-I/3018.Maximum-Number-of-Removal-Queries-That-Can-Be-Processed-I.cpp b/Dynamic_Programming/3018.Maximum-Number-of-Removal-Queries-That-Can-Be-Processed-I/3018.Maximum-Number-of-Removal-Queries-That-Can-Be-Processed-I.cpp new file mode 100644 index 000000000..42ae27531 --- /dev/null +++ b/Dynamic_Programming/3018.Maximum-Number-of-Removal-Queries-That-Can-Be-Processed-I/3018.Maximum-Number-of-Removal-Queries-That-Can-Be-Processed-I.cpp @@ -0,0 +1,41 @@ +class Solution { + int dp[1005][1005]; +public: + int maximumProcessableQueries(vector& nums, vector& queries) + { + int n = nums.size(); + int ret = 0; + + dp[0][n-1] = 0; + for (int len = n-1; len >=1; len--) + for (int i=0; i+len-1=0) + { + int t = dp[i-1][j]; + if (t= queries[t]) + dp[i][j] = max(dp[i][j], t + 1); + else + dp[i][j] = max(dp[i][j], t); + } + if (j+1= queries[t]) + dp[i][j] = max(dp[i][j], t + 1); + else + dp[i][j] = max(dp[i][j], t); + } + } + + for (int i=0; i=queries[dp[i][i]]) + ret = max(ret, dp[i][i]+1); + else + ret = max(ret, dp[i][i]); + } + return ret; + } +}; diff --git a/Dynamic_Programming/3018.Maximum-Number-of-Removal-Queries-That-Can-Be-Processed-I/Readme.md b/Dynamic_Programming/3018.Maximum-Number-of-Removal-Queries-That-Can-Be-Processed-I/Readme.md new file mode 100644 index 000000000..6f9636866 --- /dev/null +++ b/Dynamic_Programming/3018.Maximum-Number-of-Removal-Queries-That-Can-Be-Processed-I/Readme.md @@ -0,0 +1,12 @@ +### 3018.Maximum-Number-of-Removal-Queries-That-Can-Be-Processed-I + +我们令dp[i][j]表示将nums砍至区间[i:j]时,能够通过多少个queries。显然,dp[i][j]是可以由dp[i-1][j]或dp[i][j+1]转化来的。例如,令`dp[i-1][j]=t`,说明操作至[i-1:j]时已经通过了t个queries,那么如果`nums[i-1]>=queries[t]`的话,就可以再砍去nums[i-1]使得通过`t+1`个queries. 反之如果`nums[i-1]=queries[t]`,那么意味着nums可以全部被删除(即通过t+1个queries)。在更新最终答案时需要额外处理这种情况。 + diff --git a/Dynamic_Programming/3041.Maximize-Consecutive-Elements-in-an-Array-After-Modification/3041.Maximize-Consecutive-Elements-in-an-Array-After-Modification.cpp b/Dynamic_Programming/3041.Maximize-Consecutive-Elements-in-an-Array-After-Modification/3041.Maximize-Consecutive-Elements-in-an-Array-After-Modification.cpp new file mode 100644 index 000000000..1c9611b2a --- /dev/null +++ b/Dynamic_Programming/3041.Maximize-Consecutive-Elements-in-an-Array-After-Modification/3041.Maximize-Consecutive-Elements-in-an-Array-After-Modification.cpp @@ -0,0 +1,39 @@ +class Solution { + int dp[100005][2]; +public: + int maxSelectedElements(vector& nums) + { + sort(nums.begin(), nums.end()); + int n = nums.size(); + + dp[0][0] = 1; + dp[0][1] = 1; + + int ret = 1; + + for (int i=1; i& nums, int k) + { + int n = nums.size(); + nums.insert(nums.begin(), 0); + + vector>>dp(n+1, vector>(k+1, vector(2, LLONG_MIN/3))); + + for (int i=0; i<=n; i++) + { + dp[i][0][0] = 0; + } + + for (int i=1; i<=n; i++) + for (int j=1; j<=k; j++) + { + if (j%2==0) + { + dp[i][j][0] = max(dp[i-1][j][0], dp[i-1][j][1]); + dp[i][j][1] = max(dp[i-1][j][1], max(dp[i-1][j-1][0], dp[i-1][j-1][1])) - (LL)nums[i]*(k+1-j); + } + else + { + dp[i][j][0] = max(dp[i-1][j][0], dp[i-1][j][1]); + dp[i][j][1] = max(dp[i-1][j][1], max(dp[i-1][j-1][0], dp[i-1][j-1][1])) + (LL)nums[i]*(k+1-j); + } + } + + return max(dp[n][k][0],dp[n][k][1]); + + } +}; diff --git a/Dynamic_Programming/3077.Maximum-Strength-of-K-Disjoint-Subarrays/Readme.md b/Dynamic_Programming/3077.Maximum-Strength-of-K-Disjoint-Subarrays/Readme.md new file mode 100644 index 000000000..3380b64cc --- /dev/null +++ b/Dynamic_Programming/3077.Maximum-Strength-of-K-Disjoint-Subarrays/Readme.md @@ -0,0 +1,18 @@ +### 3077.Maximum-Strength-of-K-Disjoint-Subarrays + +我们令dp[i][j]表示前i个元素里找出j个subarray的最优解。注意,我们认为k是个常数,即`dp[i][j] = sum[1]*k - sum[2]*(k-1) + ...`,而不是`dp[i][j] = sum[1]*j - sum[2]*(j-1) + ...`. + +显然,我们在考虑dp[i][j]时,会思考对于nums[i]的决策。如果nums[i]不加入任何subarray,那么就有`dp[i][j] = dp[i-1][j]`. 如果nums[i]加入subarray,那么它就是属于sum[j]。但是此时有一个问题,它是加入已有的sum[j]呢,还是自己独创一个sum[j]。前者的话就是`dp[i-1][j]+nums[i]`,后者就是`dp[i-1][j-1]+nums[i]`. 但是注意到,前者要求`dp[i-1][j]`中的sum[j]必须结尾在第i-1个元素,才能将nums[i]顺利接在sum[j]里,而我们的dp定义并没有这个约束。 + +为了解决这个问题,我们重新定义dp,加入第三个维度表示“最后一个subarray是否以当前元素结尾”。即dp[i][j][0]表示前i个元素分成j个subarray,且nums[i]不参与最后一个subarray;类似dp[i][j][1]表示前i个元素分成j个subarray,且nums[i]参与了最后一个subarray。于是我们容易写出新的转移方程。以j是偶数为例,对于dp[i][j][0],由于nums[i]不起作用,完全取决于dp[i-1][j],不用考虑它的第三个维度: +``` +dp[i][j][0] = max(dp[i-1][j][0], dp[i-1][j][1]); +``` +对于dp[i][j][1],我们需要考虑nums[i]是否是接在nums[i-1]后面属于同一个subarray,还是自己新成立一个subarray。如果是前者,我们考虑的前驱状态是dp[i-1][j][1]; 如果是后者,我们考虑的前驱状态是dp[i-1][j-1][x] +``` +dp[i][j][1] = max(dp[i-1][j][1], max(dp[i-1][j-1][0], dp[i-1][j-1][1])) - (LL)nums[i]*(k+1-j); +``` +最终返回的答案是`max(dp[n][k][0], dp[n][k][1])`. + +初始状态是对于所有的dp[i][0][0]赋值为零,其他都设为负无穷大。 + diff --git a/Dynamic_Programming/3082.Find-the-Sum-of-the-Power-of-All-Subsequences/3082.Find-the-Sum-of-the-Power-of-All-Subsequences.cpp b/Dynamic_Programming/3082.Find-the-Sum-of-the-Power-of-All-Subsequences/3082.Find-the-Sum-of-the-Power-of-All-Subsequences.cpp new file mode 100644 index 000000000..60b9dccc8 --- /dev/null +++ b/Dynamic_Programming/3082.Find-the-Sum-of-the-Power-of-All-Subsequences/3082.Find-the-Sum-of-the-Power-of-All-Subsequences.cpp @@ -0,0 +1,37 @@ +using LL = long long; +class Solution { + LL dp[105][105][105]; + LL M = 1e9+7; +public: + int sumOfPower(vector& nums, int k) + { + int n = nums.size(); + nums.insert(nums.begin(), 0); + + dp[0][0][0] = 1; + + for (int i=1; i<=n; i++) + for (int s=0; s<=k; s++) + for (int j=0; j<=i; j++) + { + dp[i][s][j] = dp[i-1][s][j]; + if (s>=nums[i] && j>0) + dp[i][s][j] += dp[i-1][s-nums[i]][j-1]; + dp[i][s][j] %= M; + } + + vectorpower(10005); + power[0] = 1; + for (int i=1; i<=n; i++) + power[i] = power[i-1]*2%M; + + LL ret = 0; + for (int j=1; j<=n; j++) + { + LL t = dp[n][k][j]; + ret = (ret + t*power[n-j]%M) % M; + } + + return ret; + } +}; diff --git a/Dynamic_Programming/3082.Find-the-Sum-of-the-Power-of-All-Subsequences/Readme.md b/Dynamic_Programming/3082.Find-the-Sum-of-the-Power-of-All-Subsequences/Readme.md new file mode 100644 index 000000000..6f27c5469 --- /dev/null +++ b/Dynamic_Programming/3082.Find-the-Sum-of-the-Power-of-All-Subsequences/Readme.md @@ -0,0 +1,12 @@ +### 3082.Find-the-Sum-of-the-Power-of-All-Subsequences + +“子序列的子序列”思考起来比较费劲,但是如果只是求“和为k的子序列的个数”,这个看上去就是典型的DP。然后我们再思考一下,本题其实就是求每个“和为k的子序列”有多少个超序列(super sequence)。 + +举个列子,假设总元素个数是n。如果有一个子序列q的长度是m,它的和是k,那么nums里就有`(n-m)^2`个序列包含q,这些序列的都有这么一个子序列q满足条件,所以q本质上给最终答案贡献了`(n-m)^2`。 + +所以我们只需要求出所有不同长度的、和为k的子序列个数。这个只不过在前述DP的基础上,再增加一个变量/下标记录已经选取元素的个数。即令dp[i][s][j]表示在前i个元素里、选取j个元素、和为s的子序列有多少个。显然它的转移方程就取决于第i个元素是否选取: +```cpp +dp[i][s][j] += dp[i-1][s][j]; // no select nums[i] +dp[i][s][j] += dp[i-1][s-nums[i]][j-1], if (s>=nums[i] && j>=1); // select nums[i] +``` +最终考察完整个nums之后,我们遍历子序列的长度j,就可以知道存在有dp[n][k][j]个符合要求的子序列,并且其长度是j。那么它的超序列就有`2^(n-j)`个。 diff --git a/Dynamic_Programming/3098.Find-the-Sum-of-Subsequence-Powers/3098.Find-the-Sum-of-Subsequence-Powers.cpp b/Dynamic_Programming/3098.Find-the-Sum-of-Subsequence-Powers/3098.Find-the-Sum-of-Subsequence-Powers.cpp new file mode 100644 index 000000000..3360df7b3 --- /dev/null +++ b/Dynamic_Programming/3098.Find-the-Sum-of-Subsequence-Powers/3098.Find-the-Sum-of-Subsequence-Powers.cpp @@ -0,0 +1,55 @@ +using LL = long long; +class Solution { + LL M = 1e9+7; + int n; +public: + int sumOfPowers(vector& nums, int K) + { + n = nums.size(); + sort(nums.begin(), nums.end()); + nums.insert(nums.begin(), 0); + + LL ret = 0; + for (int i=1; i<=n; i++) + for (int j=i+1; j<=n; j++) + { + int d = nums[j]-nums[i]; + ret = (ret + helper(nums, K, d, i, j)) % M; + } + return ret; + } + + LL helper(vector& nums, int K, int d, int a, int b) + { + vector>dp1(n+2, vector(n+2)); + vector>dp2(n+2, vector(n+2)); + + for (int i=1; i<=n; i++) + { + dp1[i][1] = 1; + dp2[i][1] = 1; + } + + for (int i=1; i<=a; i++) + for (int j=2; j<=K; j++) + { + for (int k=1; nums[i]-nums[k]>d && k=b; i--) + for (int j=2; j<=K; j++) + { + for (int k=n; nums[k]-nums[i]>=d && k>i; k--) + dp2[i][j] = (dp2[i][j] + dp2[k][j-1]) % M; + } + + LL ret = 0; + for (int t=1; td`,就有`dp1[i][j] += dp1[k][j-1]`,将所有符合条件的j遍历一遍,就可以求出dp[i][j]. + +同理,我们从后往前进行DP,求出在[b,n]区间里有多少相邻元素之差大于d的子序列。可以求解dp2[i][j]表示以i开头的、长度为j、且相邻元素跨度大于等于d的子序列个数。 + +这样,我们只需要将期望长度K分配给[a,b]的前后两段,假设分别是t和K-t,就可以得到组合数`dp[a][t]*dp[b][K-t]`,对应的就是包含a和b的、符合条件的子序列的个数。我们对于所有t=1,2,...K-1,将组合数求和即可。 + +特别注意,dp1和dp2的定义略有不同,前者要求跨度大于d,后者要求跨度大于等于d。这是因为一个子序列里可能有多个最小跨度d,我们约定只认为第一个出现的最小跨度d是我们的枚举对象。所以在[1,a]区间内,我们不接受相邻元素跨度恰好为d的情况。 diff --git a/Dynamic_Programming/3122.Minimum-Number-of-Operations-to-Satisfy-Conditions/3122.Minimum-Number-of-Operations-to-Satisfy-Conditions.cpp b/Dynamic_Programming/3122.Minimum-Number-of-Operations-to-Satisfy-Conditions/3122.Minimum-Number-of-Operations-to-Satisfy-Conditions.cpp new file mode 100644 index 000000000..c3cff6617 --- /dev/null +++ b/Dynamic_Programming/3122.Minimum-Number-of-Operations-to-Satisfy-Conditions/3122.Minimum-Number-of-Operations-to-Satisfy-Conditions.cpp @@ -0,0 +1,36 @@ +class Solution { + int dp[1005][10]; +public: + int minimumOperations(vector>& grid) + { + int m = grid.size(), n = grid[0].size(); + + for (int i=0; i PII; +using LL = long long; +typedef pair PII; class Solution { public: int nthSuperUglyNumber(int n, vector& primes) { - vectorp(primes.size(),0); - - vectorrets({1}); - priority_queue, greater<>>pq; - for (int i=0; irets({1}); + vectorp(k, 0); + + priority_queue, greater<>>pq; + for (int i=0; i=0) dp0[i][j] += dp1[i-k][j]; + if (j-k>=0) dp1[i][j] += dp0[i][j-k]; + dp0[i][j] %= M; + dp1[i][j] %= M; + } + } + + return (dp0[zero][one]+dp1[zero][one]) % M; + } +}; diff --git a/Dynamic_Programming/3130.Find-All-Possible-Stable-Binary-Arrays-II/3130.Find-All-Possible-Stable-Binary-Arrays-II_v2.cpp b/Dynamic_Programming/3130.Find-All-Possible-Stable-Binary-Arrays-II/3130.Find-All-Possible-Stable-Binary-Arrays-II_v2.cpp new file mode 100644 index 000000000..3face3ad0 --- /dev/null +++ b/Dynamic_Programming/3130.Find-All-Possible-Stable-Binary-Arrays-II/3130.Find-All-Possible-Stable-Binary-Arrays-II_v2.cpp @@ -0,0 +1,40 @@ +using LL = long long; +class Solution { + LL dp0[205][205]; + LL dp1[205][205]; + LL presum0[205][205]; + LL presum1[205][205]; + LL M = 1e9+7; +public: + int numberOfStableArrays(int zero, int one, int limit) + { + dp0[0][0]=1; + dp1[0][0]=1; + presum0[0][0] = 1; + presum1[0][0] = 1; + + for (int i=0; i<=zero; i++) + for (int j=0; j<=one; j++) + { + if (i==0 && j==0) continue; + + // 1<=k<=min(i,limit) + dp0[i][j] = (i-1<0?0:presum1[j][i-1]) - (i-min(i,limit)-1<0?0:presum1[j][i-min(i,limit)-1]); + + + // 1<=k<=min(j,limit) + dp1[i][j] = (j-1<0?0:presum0[i][j-1]) - (j-min(j,limit)-1<0?0:presum0[i][j-min(j,limit)-1]); + + dp0[i][j] = (dp0[i][j] + M) %M; + dp1[i][j] = (dp1[i][j] + M) %M; + + presum0[i][j] = (j<1?0:presum0[i][j-1]) + dp0[i][j]; + presum1[j][i] = (i<1?0:presum1[j][i-1]) + dp1[i][j]; + + presum0[i][j] %= M; + presum1[j][i] %= M; + } + + return (dp0[zero][one]+dp1[zero][one]) % M; + } +}; diff --git a/Dynamic_Programming/3130.Find-All-Possible-Stable-Binary-Arrays-II/Readme.md b/Dynamic_Programming/3130.Find-All-Possible-Stable-Binary-Arrays-II/Readme.md new file mode 100644 index 000000000..69c9a9c37 --- /dev/null +++ b/Dynamic_Programming/3130.Find-All-Possible-Stable-Binary-Arrays-II/Readme.md @@ -0,0 +1,40 @@ +### 3130.Find-All-Possible-Stable-Binary-Arrays-II + +#### 解法1: +对于每一步决策而言,我们需要考虑的因素无非就是:已经用了几个0,已经用了几个1,当前最后一步是0还是1. 事实上,我们就用这些状态作为变量,即可定义动态规划。令dp0[i][j]表示已经用了i个0、j个1,并且最后一个数字填写的是0时,可以构造的stable binary array的个数。类似地,令dp1[i][j]表示已经用了i个0、j个1,并且最后一个数字填写的是1时,可以构造的stable binary array的个数。 + +如何计算dp0[i][j]呢?因为最后一步填0,且唯一的限制就是不能有连续超过limit+1个0,所以它之前最后一次出现的1,必须在`i+j-limit, i+j-limit+1, ..., i+j-1`中间的一处。所以就有 +``` +dp0[i][j] = dp1[i-limit][j] + dp1[i-limit+1][j] + ... + dp1[i-1][j] +``` +同理,dp1[i][j]的前趋状态取决于最后一次出现0的位置, +``` +dp1[i][j] = dp0[i][j-limit] + dp1[i][j-limit+1] + ... + dp1[i][j-1] +``` +综上,我们用三层循环就可以求出dp0和dp1。最终答案就是将所有的0和1用完,但结尾的元素可以是0或1,即`dp0[zero][one]+dp1[zero][one]`. +```cpp +for (int i=0; i<=zero; i++) + for (int j=0; j<=one; j++) + { + for (int k=1; k<=limit; k++) + { + if (i>=k) dp0[i][j] += dp1[i-k][j]; + if (j>=k) dp1[i][j] += dp0[i][j-k]; + } + } +``` + +#### 解法2: +注意到上述解法的最内层循环,其实dp0[i][j]是累加了dp1[...][j]的一段区间,区间范围是[i-min(i,limit), i-1]. 同理,dp1[i][j]是累加了dp0[i][...]的一段区间,区间范围是[j-min(j,limit), j-1]. 为了节省这层循环,我们想到可以用前缀和。 + +令`presum0[i][...]`表示`dp0[i][...]`的前缀和,`presum1[j][...]`表示`dp1[...][j]`的前缀和。于是区间之和就可以表示成前缀和之差: +``` +dp0[i][j] = presum1[j][i-1] - presum1[j][i-min(i,limit)-1] +dp1[i][j] = presum0[i][j-1] - presum0[i][j-min(j,limit)-1] +``` +用完之后,记得将新算出的dp0[i][j]和dp1[i][j]来更新presum0与presum1 +``` +presum0[i][j] = presum0[i][j-1] + dp0[i][j] +presum1[j][i] = presum1[j][i-1] + dp1[i][j] +``` +就这样在i与j的双层循环里,不断滚动更新dp0[i][j]、dp1[i][j]、presum0[i][j]与presum1[j][i]. diff --git a/Dynamic_Programming/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II_v1.cpp b/Dynamic_Programming/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II_v1.cpp new file mode 100644 index 000000000..62a19d259 --- /dev/null +++ b/Dynamic_Programming/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II_v1.cpp @@ -0,0 +1,31 @@ +class Solution { + int dp[505][26]; +public: + int maximumLength(vector& nums, int k) + { + int n = nums.size(); + + int ret = 1; + + for (int i=0; i=1) + ans = max(ans, dp[j][t-1]+1); + } + + dp[i][t] = ans; + ret = max(ret, ans); + } + } + + return ret; + } +}; diff --git a/Dynamic_Programming/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II_v2.cpp b/Dynamic_Programming/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II_v2.cpp new file mode 100644 index 000000000..02fc5b52f --- /dev/null +++ b/Dynamic_Programming/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II_v2.cpp @@ -0,0 +1,35 @@ +class Solution { + int dp[5005][55]; +public: + int maximumLength(vector& nums, int k) + { + int n = nums.size(); + vector>max_value(55); + vectormax_all(55); + + int ret = 1; + + for (int i=0; i=1) + ans = max(ans, max_all[t-1]+1); + + dp[i][t] = ans; + ret = max(ret, ans); + } + + for (int t=0; t<=k; t++) + { + max_value[t][nums[i]] = max(max_value[t][nums[i]], dp[i][t]); + max_all[t] = max(max_all[t], dp[i][t]); + } + } + + return ret; + } +}; diff --git a/Dynamic_Programming/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II/Readme.md b/Dynamic_Programming/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II/Readme.md new file mode 100644 index 000000000..6e055fbf6 --- /dev/null +++ b/Dynamic_Programming/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II/Readme.md @@ -0,0 +1,59 @@ +### 3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II + +#### 解法1:For 3176 +对于常规的DP解法,我们容易设置状态变量dp[i][t]表示前i个元素里、我们已经出现了t次相邻元素不等的情况下,能够得到的goode subsequence的最大长度。 + +显然转移的突破口就在于nums[i]是否与sequence的前一个元素相同。我们枚举j=1) + ans = max(ans, dp[j][t-1]+1); + } + dp[i][t] = ans; + ret = max(ret, ans); + } + } +``` + +#### 解法2: +上述解法的时间复杂度是o(N^2*K)。如何优化呢?事实上我们可以对最内层的j循环优化。首先看else分支,它的本质是在dp[j][t]中取最大值。这个其实我们可以在之前计算dp的过程中顺便维护一下dp[j][t]的最大值即可。即`max_all[t] = max(dp[j][t]) j=0,1,2..i-1`. + +再看if分支,它的本质就是在dp[j][t-1]中、对于那些nums[j]==nums[i]的dp值取最大值。这其实也可以用一个hash,以nums[i]为key,来维护这个最大值。即`max_value[t][v] = max(dp[j][t][v]) j=0,1,2..i-1 and nums[j]==v`. + +有人会说,else分支是只有在nums[j]!=nums[i]时才跑的,所以`max_all[t]`的定义有缺陷,应该剔除掉一些元素。其实我们可以不在意。因为逻辑上一定有`dp[j][t]>dp[j][t-1]`,所以对于那些nums[j]==nums[i]的元素j,走if分支会比走else分支更合算。所以`max_all[t]`不剔除与nums[i]相同的那些dp值也没有关系。 + +于是解法1就可以变成如下。注意我们在t循环计算完dp[i][t]之后,再进行一次循环更新max_valuet[t][nums[i]和max_all[t]. +```cpp + for (int i=0; i& power) + { + mapMap; + for (int x: power) + Map[x]++; + + vector>spell(Map.begin(), Map.end()); + + vectordp(spell.size()); + + for (int i=0; i=1) dp[i] = max(dp[i], dp[i-1]); + + // pick the i-th spell along with previous ones + if (i>=1 && p - spell[i-1].first > 2) + dp[i] = max(dp[i], dp[i-1] + p * count); + else if (i>=2 && p - spell[i-2].first > 2) + dp[i] = max(dp[i], dp[i-2] + p * count); + else if (i>=3) + dp[i] = max(dp[i], dp[i-3] + p * count); + } + + return dp[spell.size()-1]; + } +}; diff --git a/Dynamic_Programming/3186.Maximum-Total-Damage-With-Spell-Casting/Readme.md b/Dynamic_Programming/3186.Maximum-Total-Damage-With-Spell-Casting/Readme.md new file mode 100644 index 000000000..dc3212c0b --- /dev/null +++ b/Dynamic_Programming/3186.Maximum-Total-Damage-With-Spell-Casting/Readme.md @@ -0,0 +1,9 @@ +### 3186.Maximum-Total-Damage-With-Spell-Casting + +这非常类似一个house robber的问题。我们将所有的spell按照power的distinct value按从小到大排序,定义dp[i]表示从前i件spell里选取所能构成的最大和,其中题意要求不能选取power值差距在2以内的spell。特别注意,dp[i]不一定要求必须取第i种spell。 + +当我们考察spell[i]的时候,我们可以不取第i种spell,这样的话就是dp[i]=dp[i-1]. + +如果取第i种spell,那么保底就是仅取第i种药水的收益。其次我们查看spell[i-1]是否与spell[i]的差值在2之外,如果是的话,那么dp[i]就可以在dp[i-1]的基础上加上所有属于spell[i]的power。如果不是的话,我们往前查看spell[i-2]与spell[i]的差值是否在2之外,如果是的话,那么dp[i]就可以在dp[i-2]的基础上加上所有属于spell[i]的power。如果再不是的话,那么dp[i]可以直接在dp[i-3]的基础上加上所有属于spell[i]的power,这是因为spell数值彼此不同,spell[i-3]和spell[i]的差值必然大于2. + +最终答案返回dp[n-1]即可。 diff --git a/Dynamic_Programming/3193.Count-the-Number-of-Inversions/3193.Count-the-Number-of-Inversions.cpp b/Dynamic_Programming/3193.Count-the-Number-of-Inversions/3193.Count-the-Number-of-Inversions.cpp new file mode 100644 index 000000000..c07e9f516 --- /dev/null +++ b/Dynamic_Programming/3193.Count-the-Number-of-Inversions/3193.Count-the-Number-of-Inversions.cpp @@ -0,0 +1,54 @@ +using LL = long long; +class Solution { + LL dp[305][405]; + LL M = 1e9+7; +public: + int numberOfPermutations(int n, vector>& requirements) + { + dp[0][0] = 1; + + mapMap; + for (auto req: requirements) + { + int end = req[0] + 1; + int cnt = req[1]; + Map[end] = cnt; + } + + int cur = 0; + for (int i=1; i<=n; i++) + { + if (Map.find(i)!=Map.end()) + cur = Map[i]; + + auto iter = Map.lower_bound(i); + LL limit = iter->second; + for (int j=cur; j<=limit; j++) + { + for (int k=0; k<=j; k++) + { + if (j-k<=i-1) + { + dp[i][j] += dp[i-1][k]; + dp[i][j] %= M; + } + } + } + + if (Map.upper_bound(i)==Map.end()) + { + LL ret = dp[i][cur]; + return ret * fact(n-i) % M; + } + } + return 0; + } + + LL fact(LL n) + { + LL ret = 1; + for (int i=1; i<=n; i++) + ret = ret * i % M; + return ret; + } +}; diff --git a/Dynamic_Programming/3193.Count-the-Number-of-Inversions/Readme.md b/Dynamic_Programming/3193.Count-the-Number-of-Inversions/Readme.md new file mode 100644 index 000000000..d0c4a09ff --- /dev/null +++ b/Dynamic_Programming/3193.Count-the-Number-of-Inversions/Readme.md @@ -0,0 +1,22 @@ +### 3193.Count-the-Number-of-Inversions + +对于permutation类型的DP题有着类似的套路。其核心就是,任意一个长度为n的permutation的前i个元素,可以一一对应于一个长度为i的permutation。所以对于长度为n的permutation做动态规划时,对于状态变量dp[i](长度为n的permutation的前i个元素组成的、符合条件的序列个数),都可以等效看做是长度为i的(即1-i组成的)、符合条件的permutation个数。 + +因为本题涉及逆序对的数目,并且题目数据量给出的逆序对数目不超过400,故我们可以将其作为状态变量的一个下标。即我们定义dp[i][j]表示1-i组成的permutation里、逆序对数目是j的排列的个数。 + +对于前i个元素组成的permutation,能允许有多少逆序对呢?这取决于requirements给出的约束。举个例子,如果requirement要求在第p位有a个逆序对,在第q位有b个逆序对,并且恰好有`p<=i<=q`(即p和q是i最贴近的两个约束点),那么对于dp[i][j]而言,j的取值就是`a<=j<=b`. + +如何求解dp[i][j]呢?我们需要寻找它与前驱状态dp[i-1][k]的关系。注意到相对于dp[i-1][],我们在permutation中引入了新元素i,如果将i放在排列的最后,那么它不会引入任何新的逆序对。如果将i放在排列的倒数第二个位置,那么会引入一个逆序对... 依次类推,如果前驱状态dp[i-1][k]已经有k个逆序对,那么相对于j而言,我们需要再引入`j-k`个逆序对。这能否实现呢?其实只需要满足在i-1的排列中至少有j-k个元素即可,即`j-k<=i-1`。故 +```cpp +for (int i=1; i<=n; i++) +{ + int a = ... , b = ... ; + for (int j=a; j<=b; j++) + for (int k=0; k<=j; k++) + { + if (j-k <= i-1) + dp[i][j] += dp[i-1][k]; + } +} +``` +注意到,当处理完requirement的最后一个约束位置i后,此后上限b就不存在了。此时意味还有`t = n-i`个元素没有加入排列。注意这t个元素不能加入前i个元素组成的排列里,否则会违反在i处的约束;但是这t个元素本身可以在排列之后任意混排,不影响之前的requirement。故最终的答案就是`dp[i][r]*t!`,其中r表示requirement在i处的逆序对数目要求。 diff --git a/Dynamic_Programming/3196.Maximize-Total-Cost-of-Alternating-Subarrays/3196.Maximize-Total-Cost-of-Alternating-Subarrays.cpp b/Dynamic_Programming/3196.Maximize-Total-Cost-of-Alternating-Subarrays/3196.Maximize-Total-Cost-of-Alternating-Subarrays.cpp new file mode 100644 index 000000000..7b7ac7b8d --- /dev/null +++ b/Dynamic_Programming/3196.Maximize-Total-Cost-of-Alternating-Subarrays/3196.Maximize-Total-Cost-of-Alternating-Subarrays.cpp @@ -0,0 +1,20 @@ +using LL = long long; +class Solution { + LL dp[100005][2]; +public: + long long maximumTotalCost(vector& nums) + { + int n = nums.size(); + + dp[0][1] = nums[0]; + dp[0][0] = LLONG_MIN/2; + + for (int i=1; i& nums, int k) + { + vectorlast(k, -1); + int ret = 0; + + int n = nums.size(); + for (int i=0; ifreq(26); + for (char c : s) { + freq[c-'a']++; + } + int max_freq = *max_element(freq.begin(), freq.end()); + + int ret = INT_MAX/2; + vectordiff(26); + + for (int target = 1; target <= max_freq; target++) + { + for (int i=0; i<26; i++) + diff[i] = freq[i] - target; + vector>dp(26, vector(2, INT_MAX/2)); + + int carry; + dp[0][0] = freq[0]; + dp[0][1] = abs(diff[0]); + + for (int i=1; i<26; i++) + { + dp[i][0] = min(dp[i-1][0], dp[i-1][1]) + freq[i]; + + dp[i][1] = min(dp[i-1][0], dp[i-1][1]) + abs(diff[i]); + + if (i>=1 && diff[i-1]>0 && diff[i]<0) + { + int common = min(abs(diff[i-1]), abs(diff[i])); + dp[i][1] = min(dp[i][1], dp[i-1][1] + abs(diff[i])-common); + } + + if (i>=1 && freq[i-1]>0 && diff[i]<0) + { + int common = min(abs(freq[i-1]), abs(diff[i])); + dp[i][1] = min(dp[i][1], dp[i-1][0] + abs(diff[i])-common); + } + } + + ret = min(ret, min(dp[25][0], dp[25][1])); + } + + return ret; + } +}; diff --git a/Dynamic_Programming/3389.Minimum-Operations-to-Make-Character-Frequencies-Equal/Readme.md b/Dynamic_Programming/3389.Minimum-Operations-to-Make-Character-Frequencies-Equal/Readme.md new file mode 100644 index 000000000..c645d91c0 --- /dev/null +++ b/Dynamic_Programming/3389.Minimum-Operations-to-Make-Character-Frequencies-Equal/Readme.md @@ -0,0 +1,20 @@ +### 3389.Minimum-Operations-to-Make-Character-Frequencies-Equal + +此题的突破口是对所有可能的频率进行尝试,暴力地从1枚举到max_freq(对应出现频次最多的字母),然后考察是否有一种方法能够将所有字符的频次都变换成target。 + +对于题目中的规则,我们最需要深刻体会的就是第三条。事实上,我们不会将某个字符连续变换两次。比如说,a->b->c,那这两次变换,还不如直接删除a,添加c来得直观。所以唯一使用规则三的情景就是:对于字母表相邻的两种字符x和y,如果x需要删除一些,y需要增加一些,那么我们不妨将部分的x转化为y,以节省操作。更具体的,如果x需要删除a个,y需要增加b个,普通的“删除+增加”的操作需要a+b次,但是如果将c=min(a,b)个x转化为y次,我们就额外节省了c次操作。 + +此题的复杂之处在于,即使我们确定了某个目标频次target,但规则同时也允许部分字符的频次变成零。对于这两个不同的“做法”,需要考虑的策略其实也是不同的。因此我们对于a->z的的每个字符,都要考虑到它的两种“做法”。 + +假设我们考虑相邻的两个字符i-1和字符i。令`diff[i]=freq[i]-target`,正则表示相比于target多了,负表示相比于target还亏欠。定义dp[i][0]表示对字符i采取清零操作的总最小代价;dp[i][1]表示对字符i变换成target频次的总最小代价。 + +1. 如果对字符i采取清零,即dp[i][0],那么所需要的操作数必然是freq[i],与之前的状态无关。故`dp[i][0] = min(dp[i-1][0], dp[i-1][1]) + freq[i]`; + +2. 如果对字符i变换成目标targt,那么我们分两种情况: + * 不采用规则3,只靠增删,那么同理有`dp[i][1] = min(dp[i-1][0], dp[i-1][1]) + abs(diff[i])` + * 采用规则3,同时前一个字符的操作是通过删减达到清零,这就意味着a=freq[i-1],b=abs(diff[i]),故c=min(a,b),且有`dp[i][1] = dp[i-1][0]+abs(diff[i])-c` + * 采用规则3,同时前一个字符的操作是通过删减达到target,这就意味着a=diff[i-1],b=abs(diff[i]),故c=min(a,b),且有`dp[i][1] = dp[i-1][0]+abs(diff[i])-c` + +最终对于target而言,最优解就是遍历完26个字母后的`min(dp[25][0],dp[25][1])` + +全局最取遍历所有target之后的最优解。 diff --git a/Dynamic_Programming/3444.Minimum-Increments-for-Target-Multiples-in-an-Array/3444.Minimum-Increments-for-Target-Multiples-in-an-Array.cpp b/Dynamic_Programming/3444.Minimum-Increments-for-Target-Multiples-in-an-Array/3444.Minimum-Increments-for-Target-Multiples-in-an-Array.cpp new file mode 100644 index 000000000..86b5d5a9f --- /dev/null +++ b/Dynamic_Programming/3444.Minimum-Increments-for-Target-Multiples-in-an-Array/3444.Minimum-Increments-for-Target-Multiples-in-an-Array.cpp @@ -0,0 +1,33 @@ +class Solution { + int dp[50005][1<<4]; +public: + int minimumIncrements(vector& nums, vector& target) + { + int n = nums.size(); + int m = target.size(); + nums.insert(nums.begin(), 0); + + for (int state = 0; state<(1<0; subset=(subset-1)&state) + { + long long L = 1; + for (int j=0; j>j)&1) + L = lcm(L, target[j]); + } + long long cost = (nums[i] % L == 0) ? 0 : (L - nums[i]%L); + dp[i][state] = min((long long)dp[i][state], (long long)dp[i-1][state-subset] + cost); + } + } + + return dp[n][(1<& nums) + { + for (int i=0; i& nums, vector>& queries) + { + for (int i=0; i=0; v--) + if (v>=d && dp[i][v-d] == true) + dp[i][v] = true; + } + if (isOK(nums)) return k+1; + } + return -1; + } +}; diff --git a/Dynamic_Programming/3489.Zero-Array-Transformation-IV/Readme.md b/Dynamic_Programming/3489.Zero-Array-Transformation-IV/Readme.md new file mode 100644 index 000000000..476ca9320 --- /dev/null +++ b/Dynamic_Programming/3489.Zero-Array-Transformation-IV/Readme.md @@ -0,0 +1,17 @@ +### 3489.Zero-Array-Transformation-IV + +这本质是一个背包问题。每处理一个query,在对应区间内的nums[i]就多得了一次删减的操作。 + +我们需要查看这些nums[i]在获得这个额外的删减机会之后,是否能连同之前的删减操作,实现置零?很显然,如果该query能够让nums[i]再削减d,那么就取决于nums[i]之前能否削减至d。 + +我们令dp[i][v]表示如果nums[i]的数值是v,能否最终削减成为零。就有 +``` +for q: queries + a = q[0], b = q[1], d = q[2]; + for (i=a; i=b; i++) { + for (int v=0; v<=1000; v++) { + dp[i][v] = dp[i][v] || d[i][v-d]; + } + } +``` +最终查看所有的dp[i][nums[i]]是否为true。 diff --git a/Dynamic_Programming/3538.Merge-Operations-for-Minimum-Travel-Time/3538.Merge-Operations-for-Minimum-Travel-Time.cpp b/Dynamic_Programming/3538.Merge-Operations-for-Minimum-Travel-Time/3538.Merge-Operations-for-Minimum-Travel-Time.cpp new file mode 100644 index 000000000..45d1a4ca3 --- /dev/null +++ b/Dynamic_Programming/3538.Merge-Operations-for-Minimum-Travel-Time/3538.Merge-Operations-for-Minimum-Travel-Time.cpp @@ -0,0 +1,30 @@ +const int INF = INT_MAX / 2; +int dp[51][11][101]; + +class Solution { +public: + int minTravelTime(int l, int n, int K, vector& pos, vector& time) { + + fill(&dp[0][0][0], &dp[0][0][0]+51*11*101, INT_MAX/2); + + dp[0][0][time[0]] = 0; + + for (int i=0; idp(n, INT_MAX/2); + + for (int j=0; j findCoins(vector& numWays) { + int n = numWays.size(); + numWays.insert(numWays.begin(), 0); + + vectorrets; + vectordp(n+1); + dp[0] = 1; + for (int c=1; c<=n; c++) { + if (numWays[c] == dp[c]) + continue; + rets.push_back(c); + for (int i=c; i<=n; i++) { + dp[i] += dp[i-c]; + } + } + + for (int i=1; i<=n; i++) { + if (dp[i]!=numWays[i]) + return {}; + } + + return rets; + } +}; diff --git a/Dynamic_Programming/3592.Inverse-Coin-Change/Readme.md b/Dynamic_Programming/3592.Inverse-Coin-Change/Readme.md new file mode 100644 index 000000000..46bbd2cf0 --- /dev/null +++ b/Dynamic_Programming/3592.Inverse-Coin-Change/Readme.md @@ -0,0 +1,26 @@ +### 3592.Inverse-Coin-Change + +这是一道非常有意思的“反向重构”的DP题。 + +首先,从题境上来看这类似一道经典的背包问题。给出一系列硬币面值coins可以无限重复使用,问有多少种构造方法能拼出面值n来?对于这个问题,我们有如下动态规划解法。特别注意最外层的循环是硬币面额 +``` +for (int c: coins) + for (int i=c; i<=n; i++) + dp[i] += dp[i-c]; +``` +状态转移的思想就是,每轮引入一种新的硬币面额c:遍历所有面值i,考察最后一个硬币是否使用c。如果不使用,那么dp[i]依然是前一轮的数值;如果使用c,那么就取决于dp[i-c]。 + +我们可以沿用同样的思路,来思考numWays(也就是dp)是怎么计算出来的 +``` +for (int c: coins) { + if c 不存在 + continue; + else if c 存在 + for (int i=c; i<=n; i++) + dp[i] += dp[i-c]; +} +``` + +那么如何判断c是否存在呢?突破点就是面额c的构造方法。如果numWays[c]等于前一轮(还没有引入面额c的时候)的dp[c],那么就意味着面额c一定不存在。否则c的构造必然至少还能增加一种方法(即只使用一个c硬币)。反之,面额c必须存在,否则无法弥补这个差额。 + +当然,我们仅凭numWays[c]的分析来决定面额的种类还是比较片面的,它不能保证最终据此计算得到的dp都与所有的numsWays一致。所以我们还要再次校验一下。 diff --git a/Dynamic_Programming/3654.Minimum-Sum-After-Divisible-Sum-Deletions/3654.Minimum-Sum-After-Divisible-Sum-Deletions.cpp b/Dynamic_Programming/3654.Minimum-Sum-After-Divisible-Sum-Deletions/3654.Minimum-Sum-After-Divisible-Sum-Deletions.cpp new file mode 100644 index 000000000..1a4362f44 --- /dev/null +++ b/Dynamic_Programming/3654.Minimum-Sum-After-Divisible-Sum-Deletions/3654.Minimum-Sum-After-Divisible-Sum-Deletions.cpp @@ -0,0 +1,23 @@ +using ll = long long; +class Solution { +public: + long long minArraySum(vector& nums, int k) { + int n = nums.size(); + nums.insert(nums.begin(), 0); + + vectordp(n+1,LLONG_MAX/4); + vectordp_by_r(k,LLONG_MAX/4); + dp[0] = 0; + dp_by_r[0] = 0; + + ll presum = 0; + for (int i=1; i<=n; i++) { + presum += nums[i]; + int r = presum % k; + dp[i] = min(dp[i-1] + nums[i], dp_by_r[r]); + dp_by_r[r] = min(dp_by_r[r], dp[i]); + } + + return dp[n]; + } +}; diff --git a/Dynamic_Programming/3654.Minimum-Sum-After-Divisible-Sum-Deletions/Readme.md b/Dynamic_Programming/3654.Minimum-Sum-After-Divisible-Sum-Deletions/Readme.md new file mode 100644 index 000000000..c743eef2f --- /dev/null +++ b/Dynamic_Programming/3654.Minimum-Sum-After-Divisible-Sum-Deletions/Readme.md @@ -0,0 +1,9 @@ +### 3654.Minimum-Sum-After-Divisible-Sum-Deletions + +首先,题意中“after each deletion, the remaining elements close the gap”这个表述其实带有迷惑性。我们其实不需要这样的操作。假设有a,b,c,d四个位置,我们先删除[b,c],剩余合并之后再删除[a,d],那么必然等价于可以直接删除[a,d]。因为[b:c]能被k整除,且[a:b)+(c:d]能被k整除,则[a:d]一定也能被k整除。因此原题可以转化为,在nums里删除若干段互不相交的、能被k整除的区间,使得剩余的元素之和最小。 + +由此本题就具有了典型的“无后效性”。我们可以考虑动态规划。令dp[i]表示只处理前i个元素能够得到最优解,突破口就在于nums[i]的处理。 +1. 如果不删除nums[i],那么直接有`dp[i] = dp[i-1]+nums[i]`。 +2. 如果删除nums[i],那么我们需要找到一个位置j,使得sum[j+1:i]能被k整除,则有转移方程`dp[i]=dp[j]`。满足条件“sum[j+1:i]能被k整除”的j可能有多处,它们明显有一个共同的特征,就是前缀和presum[j]必须与presum[i]关于k同余。于是我们可以将i之前的所有dp值按照`presum[j]%k`的余数分类,每种余数只记录最小的dp值,记作dp_by_r。假设`presum[i]%k==r`,那么我们就可以直接得到`dp_by_r[r]`,就是dp[i]的前驱状态。 + +最终返回dp[n]即可。 diff --git a/Dynamic_Programming/3661.Maximum-Walls-Destroyed-by-Robots/3661.Maximum-Walls-Destroyed-by-Robots.cpp b/Dynamic_Programming/3661.Maximum-Walls-Destroyed-by-Robots/3661.Maximum-Walls-Destroyed-by-Robots.cpp new file mode 100644 index 000000000..ded1f1cbf --- /dev/null +++ b/Dynamic_Programming/3661.Maximum-Walls-Destroyed-by-Robots/3661.Maximum-Walls-Destroyed-by-Robots.cpp @@ -0,0 +1,39 @@ +class Solution { + int dp[100005][2]; +public: + int maxWalls(vector& robots, vector& distance, vector& walls) { + int n = robots.size(); + vector>r; + for (int i=0; i>& grid) { + int M = 1e9+7; + int m = grid.size(), n = grid[0].size(); + for (int i=0; i=1 && j>=0) { + if (grid[i-1][j]==0) + dp[i][j][0] += dp[i-1][j][0]+dp[i-1][j][1]; + else + dp[i][j][0] += dp[i-1][j][1]; + } + if (i>=0 && j-1>=0) { + if (grid[i][j-1]==0) + dp[i][j][1] += dp[i][j-1][0]+dp[i][j-1][1]; + else + dp[i][j][1] += dp[i][j-1][0]; + } + dp[i][j][0]%=M; + dp[i][j][1]%=M; + } + return (dp[m-1][n-1][0]+dp[m-1][n-1][1])%M; + } +}; diff --git a/Dynamic_Programming/3665.Twisted-Mirror-Path-Count/Readme.md b/Dynamic_Programming/3665.Twisted-Mirror-Path-Count/Readme.md new file mode 100644 index 000000000..4769aca17 --- /dev/null +++ b/Dynamic_Programming/3665.Twisted-Mirror-Path-Count/Readme.md @@ -0,0 +1,11 @@ +### 3665.Twisted-Mirror-Path-Count + +考虑到对于镜子而言,左边入只能下边出,上边入只能右边出,所以我们需要在设计dp状态时考虑入射方向。令dp[i][j][d]表示以d方向进入(i,j)时的路径数量。令d=0表示向下,d=1表示向右。 + +考虑从(i-1,j)往下进入(i,j)。如果(i-1,j)是普通的格子,那么就有`dp[i][j][0]+=dp[i-1][j][0]+dp[i-1][j][1]`.如果(i-1,j)是镜子,那么只能是向右进入镜子的路径才能往下进入(i,j),即`dp[i][j][0]+=dp[i-1][j][1]`。 + +同理考虑从(i,j-1)往右进入(i,j),更新`dp[i][j][1]`. + +最终输出`dp[m-1][n-1][0]+`dp[m-1][n-1][1]`. + +注意初始条件(0,0),可以任意设置`dp[0][0][0]=1`或者`dp[0][0][1]=1`。 diff --git a/Dynamic_Programming/416.Partition-Equal-Subset-Sum/416.Partition-Equal-Subset-Sum_dp_v2.cpp b/Dynamic_Programming/416.Partition-Equal-Subset-Sum/416.Partition-Equal-Subset-Sum_dp_v2.cpp new file mode 100644 index 000000000..9463455f2 --- /dev/null +++ b/Dynamic_Programming/416.Partition-Equal-Subset-Sum/416.Partition-Equal-Subset-Sum_dp_v2.cpp @@ -0,0 +1,21 @@ +class Solution { + int dp[205][20005]; +public: + bool canPartition(vector& nums) + { + int sum = accumulate(nums.begin(), nums.end(), 0); + if (sum%2!=0) return false; + + int n = nums.size(); + nums.insert(nums.begin(), 0); + + dp[0][0] = 1; + for (int i=1; i<=n; i++) + for (int s = 0; s<=sum/2; s++) + { + dp[i][s] = dp[i-1][s] || (s>=nums[i] && dp[i-1][s-nums[i]]); + } + + return dp[n][sum/2]; + } +}; diff --git a/Dynamic_Programming/416.Partition-Equal-Subset-Sum/416.Partition-Equal-Subset-Sum_dp_v3.cpp b/Dynamic_Programming/416.Partition-Equal-Subset-Sum/416.Partition-Equal-Subset-Sum_dp_v3.cpp new file mode 100644 index 000000000..0b03ab57a --- /dev/null +++ b/Dynamic_Programming/416.Partition-Equal-Subset-Sum/416.Partition-Equal-Subset-Sum_dp_v3.cpp @@ -0,0 +1,25 @@ +class Solution { + int dp[205][20005]; +public: + bool canPartition(vector& nums) + { + int sum = accumulate(nums.begin(), nums.end(), 0); + if (sum%2!=0) return false; + + int n = nums.size(); + nums.insert(nums.begin(), 0); + + dp[0][0] = 1; + for (int i=0; i& nums) - { - int sum = accumulate(nums.begin(),nums.end(),0); - if (sum%2!=0) return false; - - unordered_setdp; - dp.insert(0); - - for (auto x: nums) - { - vectortemp; - for (auto s: dp) - { - if (s+x==sum/2) return true; - temp.push_back(s+x); - } - for (auto a: temp) - dp.insert(a); - } - return false; - } -}; diff --git a/Dynamic_Programming/416.Partition-Equal-Subset-Sum/416.Partition-Equal-Subset-Sum_v3.cpp b/Dynamic_Programming/416.Partition-Equal-Subset-Sum/416.Partition-Equal-Subset-Sum_v3.cpp deleted file mode 100644 index dee14aaa3..000000000 --- a/Dynamic_Programming/416.Partition-Equal-Subset-Sum/416.Partition-Equal-Subset-Sum_v3.cpp +++ /dev/null @@ -1,22 +0,0 @@ -class Solution { -public: - bool canPartition(vector& nums) - { - int sum = accumulate(nums.begin(),nums.end(),0); - if (sum%2!=0) return false; - - vectordp(sum/2+1,0); - dp[0] = true; - - for (auto x: nums) - { - for (int s = sum/2; s>=0; s--) - { - if (dp[s]==false) continue; - if (s+x<=sum/2) - dp[s+x] = true; - } - } - return dp[sum/2]; - } -}; diff --git a/Dynamic_Programming/416.Partition-Equal-Subset-Sum/Readme.md b/Dynamic_Programming/416.Partition-Equal-Subset-Sum/Readme.md index feedb5214..a715bc6fe 100644 --- a/Dynamic_Programming/416.Partition-Equal-Subset-Sum/Readme.md +++ b/Dynamic_Programming/416.Partition-Equal-Subset-Sum/Readme.md @@ -2,21 +2,21 @@ 本题是个NP问题。可以采用DFS的方法来暴力枚举,虽然可以利用各种剪枝优化的手段,但根本的时间复杂度仍然是o(2^N)。早期的时候DFS的版本是可以AC的,但是最近被TLE了。 -于是我们可以换一个“答案空间”去切入,那就是用背包问题的想法。DFS的解法其实是寻找在一个N维空间上搜索答案(1,0,0,....,0,1,1),其中0/1表示该数字我们是否选择。显然这个空间的候选数目的order达到了指数级别。“背包问题”就是改变解答空间,思考如果我们构建任意和为s的subset的话,是否能够实现目标。对于s而言,它的范围是从0到nums.size()*nums[i]=2e4. 这个空间是大大缩小的了。举个例子,假如dp[10]=true表示我们可以选择一部分数字加起来是10,那么我们试图思考,我们能否利用这部分数字再加上一些其他数字,使得总和是20呢?也就是说,我们能否通过dp[10]=true来帮助判断dp[20]=true呢? +于是我们可以换一个“答案空间”去切入,那就是用背包问题的想法。DFS的解法其实是寻找在一个N维空间上搜索答案(1,0,0,....,0,1,1),其中0/1表示该数字我们是否选择。显然这个空间的候选数目的order达到了指数级别。“背包问题”就是改变解答空间,思考如果我们构建任意和为s的subset的话,是否能够实现目标。对于s而言,它的范围是从0到`nums.size()*nums[i]=2e4`. 这个空间是大大缩小的了。举个例子,假如dp[10]=true表示我们可以选择一部分数字加起来是10,那么我们试图思考,我们能否利用这部分数字再加上一些其他数字,使得总和是20呢?也就是说,我们能否通过dp[10]=true来帮助判断dp[20]=true呢? 这就是01背包问题的基本思想。如果dp的空间大小合理,那么我们就可以来解决之前DFS所无法处理的复杂度。基本的模板如下: -``` -for (auto x: nums) // 遍历物品 - for (auto s= 0 to sum/2) // 遍历容量 - if dp'[s-x] = true +```cpp +for (int i=0; i& nums, int S) { int sum = accumulate(nums.begin(), nums.end(), 0); if (S>sum || S<-sum) return false; - int offset = sum; - vectordp(2*offset+1); - dp[0+offset] = 1; + int n = nums.size(); + nums.insert(nums.begin(), 0); + + int offset = 1000; + dp[0][offset] = 1; - for (auto x: nums) - { - auto temp = dp; - for (int i=-offset; i<=offset; i++) + for (int i=1; i<=n; i++) + for (int s = -1000; s<=1000; s++) { - dp[i+offset] = 0; - if (i-x>=-offset) - dp[i+offset] += temp[i-x+offset]; - if (i+x<=offset) - dp[i+offset] += temp[i+x+offset]; - //cout<=-1000) + dp[i][s+offset] += dp[i-1][s-nums[i]+offset]; + + if (s+nums[i]<=1000 && s+nums[i]>=-1000) + dp[i][s+offset] += dp[i-1][s+nums[i]+offset]; } - } - return dp[S+offset]; + return dp[n][S+offset]; } }; diff --git a/Dynamic_Programming/518.Coin-Change-2/518.Coin-Change-2_v1.cpp b/Dynamic_Programming/518.Coin-Change-2/518.Coin-Change-2_v1.cpp index 3f3465781..043f12224 100644 --- a/Dynamic_Programming/518.Coin-Change-2/518.Coin-Change-2_v1.cpp +++ b/Dynamic_Programming/518.Coin-Change-2/518.Coin-Change-2_v1.cpp @@ -2,7 +2,7 @@ class Solution { public: int change(int amount, vector& coins) { - vectordp(amount+1,0); + vectordp(amount+1,0); dp[0] = 1; for (int i=0; i& coins) { - vectordp(amount+1,0); + vectordp(amount+1,0); dp[0] = 1; for (int i=0; i cheapestJump(vector& A, int B) { int n=A.size(); - vectordp(n,INT_MAX); + vectordp(N,INT_MAX/2); vectorpath(n,-1); dp[n-1]=A[n-1]; diff --git a/Dynamic_Programming/673.Number-of-Longest-Increasing-Subsequence/Readme.md b/Dynamic_Programming/673.Number-of-Longest-Increasing-Subsequence/Readme.md index 54901a38f..24603d8ea 100644 --- a/Dynamic_Programming/673.Number-of-Longest-Increasing-Subsequence/Readme.md +++ b/Dynamic_Programming/673.Number-of-Longest-Increasing-Subsequence/Readme.md @@ -6,7 +6,7 @@ ```len[i] = max (len[j]+1) for 0<=jN) continue; if (grid[i-1][j-1]==-1||grid[x-1][y-1]==-1) continue; - if (i==1&&j==1&&x==1) - { - dp[i][j][x] = grid[0][0]; - continue; - } + if (i==1&&j==1&&x==1) continue; dp[i][j][x] = max(dp[i][j][x], dp[i-1][j][x-1]); dp[i][j][x] = max(dp[i][j][x], dp[i][j-1][x-1]); diff --git a/Dynamic_Programming/879.Profitable-Schemes/879.Profitable-Schemes.cpp b/Dynamic_Programming/879.Profitable-Schemes/879.Profitable-Schemes.cpp index 61bdc8247..92a2bd443 100644 --- a/Dynamic_Programming/879.Profitable-Schemes/879.Profitable-Schemes.cpp +++ b/Dynamic_Programming/879.Profitable-Schemes/879.Profitable-Schemes.cpp @@ -1,40 +1,34 @@ class Solution { + int dp[105][105][105]; + int M = 1e9+7; public: - int profitableSchemes(int G, int P, vector& group, vector& profit) + int profitableSchemes(int n, int minProfit, vector& group, vector& profit) { - auto dp = vector>(G+1, vector(P+1,0)); - int M = 1e9+7; + dp[0][0][0] = 1; + + int m = group.size(); + group.insert(group.begin(), 0); + profit.insert(profit.begin(), 0); - dp[0][0] = 1; - - auto dp_new = dp; - - for (int k=0; k>& edges, int distanceThreshold) + { + int dp[n][n]; + for (int i=0; i& edges) + { + int n = edges.size(); + vectorindegree(n); + for (int i=0; ivisited(n); + queueq; + for (int i=0; inext[100005]; +public: + bool isPossible(int n, vector>& edges) + { + for (auto& edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].insert(b); + next[b].insert(a); + } + + vectorodds; + for (int i=1; i<=n; i++) + { + if ((next[i].size()) % 2 == 1) + odds.push_back(i); + } + + if (odds.size()==0) return true; + + if (odds.size()==2) + { + int a = odds[0], b= odds[1]; + if (next[a].find(b)==next[a].end()) + return true; + else for (int i=1; i<=n; i++) + { + if (i==a || i==b) continue; + if (next[i].find(a)==next[i].end() && next[i].find(b)==next[i].end() ) + return true; + } + + return false; + } + + if (odds.size() == 4) + { + int a = odds[0], b= odds[1], c= odds[2], d=odds[3]; + if (next[a].find(b)==next[a].end() && next[c].find(d)==next[c].end()) + return true; + if (next[a].find(c)==next[a].end() && next[b].find(d)==next[b].end()) + return true; + if (next[a].find(d)==next[a].end() && next[b].find(c)==next[b].end()) + return true; + return false; + } + + return false; + } +}; diff --git a/Graph/2508.Add-Edges-to-Make-Degrees-of-All-Nodes-Even/Readme.md b/Graph/2508.Add-Edges-to-Make-Degrees-of-All-Nodes-Even/Readme.md new file mode 100644 index 000000000..155a303f7 --- /dev/null +++ b/Graph/2508.Add-Edges-to-Make-Degrees-of-All-Nodes-Even/Readme.md @@ -0,0 +1,13 @@ +### 2508.Add-Edges-to-Make-Degrees-of-All-Nodes-Even + +本题没有特别的,就是考虑对图的几何分析。令度为奇数的点的数目为m,分情况讨论: + +如果m等于1,那么任何新连接到m的边,都会破坏另一个节点度的偶性。 + +如果m等于2,那么(1) 如果这两点之间没有边,那么就一条边将其相邻即可。(2) 如果这两点a,b之间已经有边,那么我们需要令找一个(度为偶数)的点c,且该点ac和bc都能再连一条边。 + +如果m等于3,我们无法用两条边,仅改变这三个点的度的奇性。 + +如果m等于4,我们令其为a,b,c,d。只有ab、cd可连边,或者ac、bd可连边,或者ad、bc可连边,三种情况能符合要求。 + +如果m大于等于5,我们无法用两条边连到5个或更多的点(改变它们的度),返回false。 diff --git a/Graph/2556.Disconnect-Path-in-a-Binary-Matrix-by-at-Most-One-Flip/2556.Disconnect-Path-in-a-Binary-Matrix-by-at-Most-One-Flip.cpp b/Graph/2556.Disconnect-Path-in-a-Binary-Matrix-by-at-Most-One-Flip/2556.Disconnect-Path-in-a-Binary-Matrix-by-at-Most-One-Flip.cpp new file mode 100644 index 000000000..5788b4d30 --- /dev/null +++ b/Graph/2556.Disconnect-Path-in-a-Binary-Matrix-by-at-Most-One-Flip/2556.Disconnect-Path-in-a-Binary-Matrix-by-at-Most-One-Flip.cpp @@ -0,0 +1,35 @@ +class Solution { +public: + bool isPossibleToCutPath(vector>& grid) + { + int m = grid.size(); + int n = grid[0].size(); + for (int i=m-1; i>=0; i--) + for (int j=n-1; j>=0; j--) + { + if (i==m-1 && j==n-1) continue; + if ((i+1>=m || grid[i+1][j]==0) && (j+1>=n || grid[i][j+1]==0)) + grid[i][j] = 0; + } + if (grid[0][0]==0) return true; + + int x1=0, y1=0, x2=0, y2=0; + for (int k=0; k& coins, vector>& edges) + { + int n = coins.size(); + vector>next(n); + + vectordegree(n); + for (auto& edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].insert(b); + next[b].insert(a); + degree[a]++; + degree[b]++; + } + + vectordeleted(n); + queueq; + for (int i=0; idepth(n, -1); + for (int i=0; i=3); + + if (ret >= 1) + return (ret-1)*2; + else + return 0; + } +}; diff --git a/Graph/2603.Collect-Coins-in-a-Tree/Readme.md b/Graph/2603.Collect-Coins-in-a-Tree/Readme.md new file mode 100644 index 000000000..94a316e9a --- /dev/null +++ b/Graph/2603.Collect-Coins-in-a-Tree/Readme.md @@ -0,0 +1,9 @@ +### 2603.Collect-Coins-in-a-Tree + +首先,对于那些处于端点位置的非coin节点、及全部由非coin节点组成支链,我们注定是不会去理会的。所以我们可以第一步进行“剪枝”,用拓扑排序的方法,从度为1的非coin节点开始,一层一层往内圈剥洋葱,将这些多余的分支砍去。剩下的图形,叶子节点必然都是coin;当然也可能存在一些非coin的节点,但它们都位于去往其他coin节点的必经之路上,我们也必须去理会。 + +接下来考虑考虑题目中说,Collect all the coins that are at a distance of at most 2 from the current vertex. 这就意味着我们不必走到每个端点去收集coin,只要走到端点之前两步的位置就可以收集。所以我们可以进一步将这些不用到达的节点都砍去。这里我们同样可以用拓扑排序的方法,从度为1的节点开始,一层一层往内剥洋葱,从小到大来标记每个节点的深度。这里的深度的定义就是,从该点到它的所有的子孙节点里的最大距离。举个例子,假设A->B,A->C->D->E,其中B和E都是端点,那么A的深度就是4. + +通过拓扑排序标记了所有节点从外圈到内圈的深度之后,我们发现,深度大于等于3的节点是我们必须访问的。而深度小于3的节点我们不需要访问,只需要走到深度等于3的节点就能收集到端点处的coin(如果有的话)。假设深度大于等于3的节点的个数有m个,因为这m个点必然是联通的,所以对应有m-1条边。我们注意到,起点和终点必须在同一处,这就意味着无论如何每条边我们必须走两次(一来一回),所以最终的答案就是`2(m-1)`,起点选在这m个节点的任意一个都可。 + +特别注意,如果m等于0,直接返回0. diff --git a/Graph/2608.Shortest-Cycle-in-a-Graph/2608.Shortest-Cycle-in-a-Graph.cpp b/Graph/2608.Shortest-Cycle-in-a-Graph/2608.Shortest-Cycle-in-a-Graph.cpp new file mode 100644 index 000000000..4bffafa3c --- /dev/null +++ b/Graph/2608.Shortest-Cycle-in-a-Graph/2608.Shortest-Cycle-in-a-Graph.cpp @@ -0,0 +1,57 @@ +class Solution { + unordered_setnext[1005]; + int n; +public: + int findShortestCycle(int n, vector>& edges) + { + this->n = n; + for (auto&edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].insert(b); + next[b].insert(a); + } + + int ret = INT_MAX; + for (auto&edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].erase(b); + next[b].erase(a); + ret = min(ret, BFS(a,b)); + next[a].insert(b); + next[b].insert(a); + } + + if (ret==INT_MAX) return -1; + return ret+1; + } + + int BFS(int start, int end) + { + vectorvisited(n); + queueq; + q.push(start); + visited[start] = 1; + + int step = 0; + while (!q.empty()) + { + int len = q.size(); + while (len--) + { + int cur = q.front(); + q.pop(); + if (cur==end) return step; + for (int nxt: next[cur]) + { + if (visited[nxt]) continue; + q.push(nxt); + visited[nxt] = 1; + } + } + step++; + } + return INT_MAX; + } +}; diff --git a/Graph/2608.Shortest-Cycle-in-a-Graph/Readme.md b/Graph/2608.Shortest-Cycle-in-a-Graph/Readme.md new file mode 100644 index 000000000..5ed018960 --- /dev/null +++ b/Graph/2608.Shortest-Cycle-in-a-Graph/Readme.md @@ -0,0 +1,3 @@ +### 2608.Shortest-Cycle-in-a-Graph + +这是图论里的经典问题。解法非常简单,就是遍历所有的边`a-b`。查看如果将该边断开,从a到b的最短距离d,那么d+1就是就包含d的最短环。然后取全局的最小值即可。 diff --git a/Graph/2642.Design-Graph-With-Shortest-Path-Calculator/2642.Design-Graph-With-Shortest-Path-Calculator.cpp b/Graph/2642.Design-Graph-With-Shortest-Path-Calculator/2642.Design-Graph-With-Shortest-Path-Calculator.cpp new file mode 100644 index 000000000..04cfb1a13 --- /dev/null +++ b/Graph/2642.Design-Graph-With-Shortest-Path-Calculator/2642.Design-Graph-With-Shortest-Path-Calculator.cpp @@ -0,0 +1,44 @@ +class Graph { + int n; + int dp[100][100]; +public: + Graph(int n, vector>& edges) { + this->n = n; + for (int i=0; i edge) + { + int a = edge[0], b = edge[1]; + for (int i=0; iaddEdge(edge); + * int param_2 = obj->shortestPath(node1,node2); + */ diff --git a/Graph/2642.Design-Graph-With-Shortest-Path-Calculator/Readme.md b/Graph/2642.Design-Graph-With-Shortest-Path-Calculator/Readme.md new file mode 100644 index 000000000..a82e45572 --- /dev/null +++ b/Graph/2642.Design-Graph-With-Shortest-Path-Calculator/Readme.md @@ -0,0 +1,17 @@ +### 2642.Design-Graph-With-Shortest-Path-Calculator + +根据题意,我们要时刻准备返回任意两点之间的最短路径,因此Dijkstra算法是不行的。除此之外,想求任意两点之间的最短路径,最经典的算法就是Floyd算法了,而o(N^3)的时间复杂度也是可以接受的。所以我们用Floyd预处理这个图,代码非常优雅 +```cpp + for (int k = 0; k < n; k++) { + for (int i = 0; i < n; i++) { + for (int j = 0; j < n; j++) { + dp[i][j] = Math.min(dp[i][j], dp[i][k] + dp[k][j]); + } + } + } +``` +特别注意k必须放在最外层。从形式上看,本质上这就是一个动态规划。 + +当我们新增一条从a->b的edge时,会对已有网络的最短路径产生什么影响呢?很显然,dp[i][j]无非就两种情况:经过edge,不经过edge。对于前者,我们只需要考察`dp[i][a]+edge+dp[b][j]`;对于后者,依然还是`dp[i][j]`。两者取小,就是更新后的dp[i][j].所以我们能用N^2的时间更新所有的`dp[i][j]`,这也是符合数据量的。 + +综上,我们可以实时输出`dp[i][j]`表示两点之间的最短距离。 diff --git a/Graph/2699.Modify-Graph-Edge-Weights/2699.Modify-Graph-Edge-Weights.cpp b/Graph/2699.Modify-Graph-Edge-Weights/2699.Modify-Graph-Edge-Weights.cpp new file mode 100644 index 000000000..c9a2d2947 --- /dev/null +++ b/Graph/2699.Modify-Graph-Edge-Weights/2699.Modify-Graph-Edge-Weights.cpp @@ -0,0 +1,69 @@ +using PII = pair; +class Solution { + unordered_map next[105]; + int todo[105][105]; +public: + vector> modifiedGraphEdges(int n, vector>& edges, int source, int destination, int target) + { + for (auto& edge: edges) + { + int a = edge[0], b = edge[1], c=edge[2]; + if (c==-1) + { + c = 1; + todo[a][b] = 1; + todo[b][a] = 1; + } + next[a][b] = c; + next[b][a] = c; + } + + priority_queue, greater<>>pq; + vectordist1(n, INT_MAX/3); + + pq.push({0, destination}); + while (!pq.empty()) + { + auto [d, cur] = pq.top(); + pq.pop(); + if (dist1[cur]!=INT_MAX/3) continue; + dist1[cur] = d; + for (auto [nxt, weight]: next[cur]) + { + if (dist1[nxt]!=INT_MAX/3) continue; + pq.push({d+weight, nxt}); + } + } + + + vectordist(n, INT_MAX/3); + pq.push({0, source}); + while (!pq.empty()) + { + auto [d, cur] = pq.top(); + pq.pop(); + if (dist[cur]!=INT_MAX/3) continue; + dist[cur] = d; + if (cur==destination && d != target) return {}; + for (auto [nxt, weight]: next[cur]) + { + if (dist[nxt]!=INT_MAX/3) continue; + if (todo[cur][nxt]==1 && dist[cur]+weight+dist1[nxt] < target) + { + weight = target-dist[cur]-dist1[nxt]; + next[cur][nxt] = weight; + next[nxt][cur] = weight; + } + pq.push({d+weight, nxt}); + } + } + + for (auto& edge: edges) + { + int a = edge[0], b = edge[1]; + edge[2] = next[a][b]; + } + + return edges; + } +}; diff --git a/Graph/2699.Modify-Graph-Edge-Weights/Readme.md b/Graph/2699.Modify-Graph-Edge-Weights/Readme.md new file mode 100644 index 000000000..aa7ec6d09 --- /dev/null +++ b/Graph/2699.Modify-Graph-Edge-Weights/Readme.md @@ -0,0 +1,15 @@ +### 2699.Modify-Graph-Edge-Weights + +因为最终修正边权之后的图里要求所有的边都是正数,所以我们第一步肯定先将所有能修改的边从-1改为为最小的正数值1放入图中。 + +最暴力的思想就是不停地跑Dijkstra求起点到终点的最短距离。如果当前的最短距离已经大于target,那么无解。如果当前的最短距离就是target,那么我们就不需要改动。如果当前的最短距离小于target,且最短距离里不包括任何可修改的边,那么也是无解。剩下的情况就是最短距离小于target,且其中包含了至少一条可修改的边,那么我们可以贪心地将该边权调大,使得路径恰为target。这样我们就消灭了一条小于target的路径。然后重复以上的过程。这样的算法可能会跑o(E)遍的Dijkstra,会TLE。 + +我们再审视一下我们的Dijkstra算法。注意当我们每次从PQ里弹出一个已经确定最短距离的的点,会尝试通过其邻接的边将一个新点加入PQ,如果我们所用到的所有的边都是不可修改的,那么我们弹出的点及其最短路径也都是不可修改的。但是当我们需要用到一条可修改的边时,比如说已知从起点到a的最短路径,然后a与b有一条可修改的边,此时我们在将b加入PQ时就会有所顾虑。如果“起点到a的最短距离”+“ab之间的边权1”+“b到终点的最短距离”小于target的话,那么我们就违反了题意。所以我们可以贪心地更改这条可修改边,使得三段距离之和变成target。这就意味着我们需要提前计算“b到终点的最短距离”。这样,当b收录进入PQ的时候,我们就保证了这条到达b的路径,不会造成任何“起点到终点的最短路径小于target”,我们可以放心地加入PQ共后续使用。 + +所以依据上面的算法,可以在一次的Dijkstra的过程中不断地贪心地设置可修改边的边权。知道我们发现终点从PQ里弹出时,意味着我们已经确定了起点到终点的最短距离。如果这个距离不为target,那么就是无解。 + +=========== + +Q: 当边P-Q为可编辑边时,则需考虑`dist[S-P] + weight(P, Q) + dist1[Q-D] < target`,但为何我们能够笃定dist1[Q-D]未经过任何我们已经修改过的可编辑边呢? 因为如果dist1[Q-D]有经过已修改过的可编辑边,现阶段的dist1[Q-D]其实已经比当时纪录的还大了,那上面的条件式可能会给出错误的判定结果。 + +A: 假设如你所说,当从优先队列弹出P点时,在Q到D的最短路径有一条已经修改过的可编辑边,假设为AB。既然AB已经修改过,那么AB必然是从起点S到某个点(假设是C)的最短距离(已经早于P点从优先队列里处理过)的一部分。于是即存在这样一条路径S-A-B-C,它是短于S-P的(这是因为Dijkstra算法会会按从小到大输出各个点的最短路径)。OK,既然 S-A-B-C>next[100005]; + unordered_mapcount; + LL ret = 0; +public: + long long countPalindromePaths(vector& parent, string s) + { + int n = parent.size(); + for (int i=1; i countVisitedNodes(vector& edges) + { + int n = edges.size(); + vectorrets(n); + + for (int i=0; iq; + for (int i=0; i&rets) + { + if (rets[cur]!=0) + return rets[cur]; + + rets[cur] = dfs(next[cur], rets) + 1; + return rets[cur]; + } +}; diff --git a/Graph/2876.Count-Visited-Nodes-in-a-Directed-Graph/Readme.md b/Graph/2876.Count-Visited-Nodes-in-a-Directed-Graph/Readme.md new file mode 100644 index 000000000..ed8ebc279 --- /dev/null +++ b/Graph/2876.Count-Visited-Nodes-in-a-Directed-Graph/Readme.md @@ -0,0 +1,7 @@ +### 2876.Count-Visited-Nodes-in-a-Directed-Graph + +对于任何有向图而言,顺着边的方向走向去,只有两种归宿:要么进入死胡同,要么进入循环圈。所以你可以把有向图简单地认为就是若干个单链并入一个环。 + +我们先找出入度为零的节点,然后用拓扑排序的方法将所有单链上的节点排除掉。剩下的就是环上的节点。从环的任意节点出发,可以遍历整个环得到环的长度(也就是对于这些节点的答案)。 + +最后再遍历单链节点,dfs直至遇到环的入口,这段距离加上环的长度,就是对于这些节点的答案。 diff --git a/Graph/2959.Number-of-Possible-Sets-of-Closing-Branches/2959.Number-of-Possible-Sets-of-Closing-Branches.cpp b/Graph/2959.Number-of-Possible-Sets-of-Closing-Branches/2959.Number-of-Possible-Sets-of-Closing-Branches.cpp new file mode 100644 index 000000000..2665fb090 --- /dev/null +++ b/Graph/2959.Number-of-Possible-Sets-of-Closing-Branches/2959.Number-of-Possible-Sets-of-Closing-Branches.cpp @@ -0,0 +1,53 @@ +class Solution { +public: + int numberOfSets(int n, int maxDistance, vector>& roads) + { + int ret = 0; + for (int state=0; state<(1<>d(n, vector(n, INT_MAX/3)); + for (int i=0; i>i)&1)==0) continue; + d[i][i] = 0; + } + + for (auto road: roads) + { + int a = road[0], b = road[1], w = road[2]; + if (((state>>a)&1)==0) continue; + if (((state>>b)&1)==0) continue; + + for (int i=0; i>i)&1)==0) continue; + for (int j=0; j>j)&1)==0) continue; + d[i][j] = min(d[i][j], d[i][a]+w+d[b][j]); + d[i][j] = min(d[i][j], d[i][b]+w+d[a][j]); + } + } + } + + int flag = 1; + for (int i=0; i>i)&1)==0) continue; + for (int j=0; j>j)&1)==0) continue; + if (d[i][j]>maxDistance) + { + flag = 0; + break; + } + } + if (flag==0) break; + } + if (flag) ret++; + } + + return ret; + } +}; diff --git a/Graph/2959.Number-of-Possible-Sets-of-Closing-Branches/Readme.md b/Graph/2959.Number-of-Possible-Sets-of-Closing-Branches/Readme.md new file mode 100644 index 000000000..fd826991c --- /dev/null +++ b/Graph/2959.Number-of-Possible-Sets-of-Closing-Branches/Readme.md @@ -0,0 +1,15 @@ +### 2959.Number-of-Possible-Sets-of-Closing-Branches + +因为节点数目n只有10,所以我们可以暴力枚举所有的closure方案,只需要2^n不超过1024种。 + +对于每种closure的方案,我们可以用类似Floy算法的n^3的时间度算出任意两点间的最短距离(排除掉closed point),然后只需要检查是否都小于targetDistance即可。 + +Floyd松弛算法如下: +```cpp +for road : roads + int a = road[0], b = road[1], w = road[2]; + for (int i=0; i& original, vector& changed, vector& cost) + { + for (int i=0; i<26; i++) + for (int j=0; j<26; j++) + { + if (i!=j) + d[i][j] = LLONG_MAX/3; + else + d[i][j] = 0; + } + + + for (int i=0; i countOfPairs(int n, int x, int y) + { + if (x>y) return countOfPairs(n, y, x); + this->n = n; + + vectorrets; + + if (abs(x-y)<=1) + { + for (int t=1; t<=n; t++) + rets.push_back((n-t)*2); + return rets; + } + + f1(x-1); + f1(n-y); + + cout<<"OK"<=t) count[t] += 1; + } + } + + void f4(LL d) + { + for (int t=1; t<=n; t++) + { + if (t < d-t) + count[t] += d*2; + else if (t == d-t) + count[t] += d; + } + } +}; diff --git a/Graph/3017.Count-the-Number-of-Houses-at-a-Certain-Distance-II/Readme.md b/Graph/3017.Count-the-Number-of-Houses-at-a-Certain-Distance-II/Readme.md new file mode 100644 index 000000000..09dc30780 --- /dev/null +++ b/Graph/3017.Count-the-Number-of-Houses-at-a-Certain-Distance-II/Readme.md @@ -0,0 +1,67 @@ +### 3017.Count-the-Number-of-Houses-at-a-Certain-Distance-II + +我们画一个示意图,将图划分为ABC三个区域,其中[x,y]部分为C。 +``` +A-A-A-C(x)-C------C-C(y)-B-B-B + |______________| +``` + +任意两个房子之间的最短距离,可以落入如下六个分类之中, AA,BB,AC,BC,AB,CC. 比如AC表示其中一个放在位于A区,另一个房子位于C区。我们分类讨论。 + +1. AA:对于长度为a个房子的简单串联,里面有多少对距离为t的配对呢?我们记做`helper0(a)` + +对于一个合法配对,将第一个房子记做i,则另一个房子记做i+t,那么要求 +``` +i>=1 +i+t<=a +``` +得到i的范围是[1, a-t]. 故对于距离t,我们可以增加`a-t`个配对(暂时不计首尾互换的重复路径) + +2. BB,计算方法同AA。 + +3. AC:这部分是由一个长度为a的长链,加上一个长度为d的圆环。里面有多少对距离为t的配对呢? + +显然,对于处于圆环上的点,为了与A实现最短距离,我们会根据它们离圆环入口x的位置,平均拆分成两半。这样就行程了三叉的形状:一条单链长度是a+1,然后接着两条支链,长度分别是d/2和(d-1)/2. + +对于在单链上的任意一点i,与长度为b的支链上的任意一点(不包括x点)能组成合法配对的条件是 +``` +i>=1 +i+t>=a+2 +i<=a +i+t<=a+1+b +``` +得到i的范围是[max(1,a+2-t), min(a,a+b+1-t)]. 由此可以计算出有多少个配对。 + +同理,可以计算单链上的任意一点,与长度为c的另一条支链上的任意一点(不包括x点)能组成的合法配对。 + +此外,我们需要单独出计算单链上的任意一点i,到x点能组成的合法配对。单独计算这个是为了避免在处理两条支链时重复计算。 +``` +i>=1 +i+t==a+1 +``` +即需要满足t<=a时,可以增加一个配对。 + +4. BC,计算方法同AC + +5. AB,计算方法类似AA。假设A的部分长度是a,B的部分长度是b,中间间隔了2(因为x和y相连)。里面有多少对距离为t的配对呢?我们记做`helper2(a)` + +对于在A上的任意一点i,与B上的任意一点能组成合法配对的条件是 +``` +i>=1 +i<=a +i+t>=a+3 +i+t<=a+2+b +``` +得到i的范围是[max(1,a+3-t), min(a,a+b+2-t)]. 由此可以计算出有多少个配对。 + +6. CC,此部分是一个长度为d的圆环,问里面有多少个长度为t的配对? + +对于圆环上任意一点i,顺时针走t步到达i+t的位置。这两个位置要形成一个有效配对,此时要保证它们的逆时针路径要小于t。即 +``` +t < d-t +``` +对于满足这个要求的t,那么圆环上的任意一点都是可以合法配对的起点,故可以增加d个配对。比如说d=4,那么当t=1时的四个配对是[1,2],[2,3],[3,4],[4,1]. + +此时有一个特别需要注意的地方,当`2*t==d`时,虽然也可以增加d个配对,但是这d个配对里,已经包含了首尾颠倒的重复路径。比如说d=4,那么当t=2时的四个配对是[1,3],[2,4],[3,1],[4,2],可以其中包含了重复的路径。而我们之前所有情况的讨论,所计算的配对都是单向的(编号小的在前,编号大的在后),都是需要乘以2的。唯独这个情况下,我们不能再乘以2. + +将以上六种情况的计数全部加起来就是最终答案。 diff --git a/Graph/3112.Minimum-Time-to-Visit-Disappearing-Nodes/3112.Minimum-Time-to-Visit-Disappearing-Nodes.cpp b/Graph/3112.Minimum-Time-to-Visit-Disappearing-Nodes/3112.Minimum-Time-to-Visit-Disappearing-Nodes.cpp new file mode 100644 index 000000000..e256c94f3 --- /dev/null +++ b/Graph/3112.Minimum-Time-to-Visit-Disappearing-Nodes/3112.Minimum-Time-to-Visit-Disappearing-Nodes.cpp @@ -0,0 +1,35 @@ +using PII = pair; +class Solution { + vectornext[50005]; +public: + vector minimumTime(int n, vector>& edges, vector& disappear) + { + vectorrets(n, -1); + + for (auto& edge: edges) + { + int a = edge[0], b = edge[1], w = edge[2]; + next[a].push_back({b,w}); + next[b].push_back({a,w}); + } + + priority_queue, greater<>>pq; + pq.push({0, 0}); + while (!pq.empty()) + { + auto [dist, cur] = pq.top(); + pq.pop(); + if (rets[cur]!=-1) continue; + rets[cur] = dist; + + for (auto [nxt, len]: next[cur]) + { + if (rets[nxt]!=-1) continue; + if (dist + len >= disappear[nxt]) continue; + pq.push({dist + len, nxt}); + } + } + + return rets; + } +}; diff --git a/Graph/3112.Minimum-Time-to-Visit-Disappearing-Nodes/Readme.md b/Graph/3112.Minimum-Time-to-Visit-Disappearing-Nodes/Readme.md new file mode 100644 index 000000000..e5b0c07c6 --- /dev/null +++ b/Graph/3112.Minimum-Time-to-Visit-Disappearing-Nodes/Readme.md @@ -0,0 +1,5 @@ +### 3112.Minimum-Time-to-Visit-Disappearing-Nodes + +非常典型的单源最短路径问题,使用Dijkstra算法毋庸置疑。 + +我们只需要在Dikstra更新每个节点的最短时间时,判断一下此时的最短时间和该点disappear的时间。如果时间disappear更早,那么说明这个点无法出现在任何路径上,将其略过不加入Dijkstra的后续计算。 diff --git a/Graph/3123.Find-Edges-in-Shortest-Paths/3123.Find-Edges-in-Shortest-Paths.cpp b/Graph/3123.Find-Edges-in-Shortest-Paths/3123.Find-Edges-in-Shortest-Paths.cpp new file mode 100644 index 000000000..c1b10ddd1 --- /dev/null +++ b/Graph/3123.Find-Edges-in-Shortest-Paths/3123.Find-Edges-in-Shortest-Paths.cpp @@ -0,0 +1,62 @@ +using PII = pair; + +class Solution { + vectornext[50005]; +public: + vector findAnswer(int n, vector>& edges) + { + int m = edges.size(); + vectorrets(m, false); + + for (auto& edge : edges) + { + int a = edge[0], b = edge[1], w = edge[2]; + next[a].push_back({b,w}); + next[b].push_back({a,w}); + } + + priority_queue, greater<>>pq; + + pq.push({0, 0}); + vectord1(n, INT_MAX/3); + while (!pq.empty()) + { + auto [dist, cur] = pq.top(); + pq.pop(); + if (d1[cur]!= INT_MAX/3) continue; + d1[cur] = dist; + + for (auto [nxt, len]: next[cur]) + { + if (d1[nxt]!= INT_MAX/3) continue; + pq.push({dist + len, nxt}); + } + } + + pq.push({0, n-1}); + vectord2(n, INT_MAX/3); + while (!pq.empty()) + { + auto [dist, cur] = pq.top(); + pq.pop(); + if (d2[cur]!= INT_MAX/3) continue; + d2[cur] = dist; + + for (auto [nxt, len]: next[cur]) + { + if (d2[nxt]!= INT_MAX/3) continue; + pq.push({dist + len, nxt}); + } + } + + for (int i=0; iname2idx; +public: + double maxAmount(string initialCurrency, vector>& pairs1, vector& rates1, vector>& pairs2, vector& rates2) + { + unordered_setSet; + for (auto pair: pairs1) + { + Set.insert(pair[0]); + Set.insert(pair[1]); + } + for (auto pair: pairs2) + { + Set.insert(pair[0]); + Set.insert(pair[1]); + } + int idx = 0; + for (string s: Set) + name2idx[s] = idx++; + + + int n = name2idx.size(); + vector> dist1 = floyd(pairs1, rates1); + vector> dist2 = floyd(pairs2, rates2); + + int s = name2idx[initialCurrency]; + double ret = 1.0; + for (int i=0; i> floyd(vector>& pairs, vector& rates) + { + int n = name2idx.size(); + vector>dist(n, vector(n,0)); + for (int i=0; ib的路径长度(汇率)是t的话,必然有b->a的路径长度是1/t,别忘了将其也加入图优化的松弛过程。 diff --git a/Greedy/031.Next-Permutation/031.Next-Permutation.cpp b/Greedy/031.Next-Permutation/031.Next-Permutation.cpp new file mode 100644 index 000000000..c3752dcd9 --- /dev/null +++ b/Greedy/031.Next-Permutation/031.Next-Permutation.cpp @@ -0,0 +1,24 @@ +class Solution { +public: + void nextPermutation(vector& nums) + { + int i = nums.size()-1; + while (i>=1 && nums[i]<=nums[i-1]) + i--; + + if (i==0) + { + sort(nums.begin(), nums.end()); + return; + } + + i--; + + int j = nums.size()-1; + while (nums[j]<=nums[i]) + j--; + swap(nums[i], nums[j]); + sort(nums.begin()+i+1, nums.end()); + return; + } +}; diff --git a/Greedy/031.Next-Permutation/Readme.md b/Greedy/031.Next-Permutation/Readme.md new file mode 100644 index 000000000..250ef1182 --- /dev/null +++ b/Greedy/031.Next-Permutation/Readme.md @@ -0,0 +1,7 @@ +### 031.Next-Permutation + +首先,如果已经是完全降序的序列,它是没有next permuation的。此时输出重新按升序排列的数组。 + +接下来,我们从后往前遍历,当第一次出现`nums[i]arr) @@ -31,7 +31,6 @@ class Solution { for (int i = 0; i < arr.size(); i++) { max_ending_here = max_ending_here + arr[i]; - max_ending_here %= M; if (max_ending_here < 0) max_ending_here = 0; if (max_so_far < max_ending_here) diff --git a/Greedy/1402.Reducing-Dishes/Readme.md b/Greedy/1402.Reducing-Dishes/Readme.md index 020065bf9..31e5ede88 100644 --- a/Greedy/1402.Reducing-Dishes/Readme.md +++ b/Greedy/1402.Reducing-Dishes/Readme.md @@ -1,7 +1,7 @@ ### 1402.Reducing-Dishes -两个突破点。首先我们肯定会把dishes按照满意度排序,满意度高的肯定放在后面做。其次,我们肯定会取若干个满意度最高的,关键就是取多少个而已。 +两个突破点。首先我们肯定会把dishes按照满意度排序,满意度高的肯定放在后面做。其次,在选取相同数目的dishe的前提下,我们肯定会取满意度最高的若干个。于是答案的本质就是选多少dishes。 -于是我们就是要尝试取最高的1个,或者2个,或者3个,...,直至n个满意度最高的dishes,计算最后总得分,取最大值。 +于是我们就是要尝试取最高的1个,或者2个,或者3个...,直至n个满意度最高的dishes,计算最后总得分,取最大值。 -根据计算公式,显然每增加一道菜(按照满意度从高到底是第i个),总得分total就会增加的分值就是前i个菜的presum。 +这里有个化简计算的方法。我们将dishe按照满意度从高到低排列。如果取一个,答案是```satisfaction[0]```;如果取两个,答案是```2*satisfaction[0] + satisfaction[1]```;如果取三个,答案是```3*satisfaction[0] + 2*satisfaction[1] + satisfaction[2]```。可见答案每次都在前者的基础上增加```presum[i]```,其中presum是satisfaction的前缀和。 diff --git a/Greedy/1536.Minimum-Swaps-to-Arrange-a-Binary-Grid/Readme.md b/Greedy/1536.Minimum-Swaps-to-Arrange-a-Binary-Grid/Readme.md index 22c6e6bbe..c4f323fb0 100644 --- a/Greedy/1536.Minimum-Swaps-to-Arrange-a-Binary-Grid/Readme.md +++ b/Greedy/1536.Minimum-Swaps-to-Arrange-a-Binary-Grid/Readme.md @@ -2,7 +2,7 @@ 本题先进行一下转换。将每行末尾的零的个数统计一下,得到数组zeros,即zeros[i]表示第i行末尾的零的个数。我们的目标是将zeros通过adjacent swap操作,变成一个数组target,其中target[i]>=n-1-i. 求最小的操作数。 -我们首先考虑target[0],它的要求最高(需要有n-1个零)。我们考察所有的行,看看有哪些满足条件。加入有a和b两行满足条件,即zeros[a]>=n-1,zeros[b]>=n-1,那么我们应该选择将哪一行挪到第0行的位置上来呢?我们不妨举个例子: +我们首先考虑target[0],它的要求最高(需要有n-1个零)。我们考察所有的行,看看有哪些满足条件。假如有a和b两行满足条件,即zeros[a]>=n-1,zeros[b]>=n-1,那么我们应该选择将哪一行挪到第0行的位置上来呢?我们不妨举个例子: ``` X X X a X b X ``` diff --git a/Greedy/2202.Maximize-the-Topmost-Element-After-K-Moves/2202.Maximize-the-Topmost-Element-After-K-Moves.cpp b/Greedy/2202.Maximize-the-Topmost-Element-After-K-Moves/2202.Maximize-the-Topmost-Element-After-K-Moves.cpp new file mode 100644 index 000000000..a633b13c9 --- /dev/null +++ b/Greedy/2202.Maximize-the-Topmost-Element-After-K-Moves/2202.Maximize-the-Topmost-Element-After-K-Moves.cpp @@ -0,0 +1,29 @@ +class Solution { +public: + int maximumTop(vector& nums, int K) + { + int n = nums.size(); + int ret = INT_MIN; + for (int i=1; i<=n; i++) + { + int k = K; + if (k1) + flag = 1; + + if (flag) ret = max(ret, nums[i-1]); + } + return ret==INT_MIN? -1:ret; + } +}; diff --git a/Greedy/2202.Maximize-the-Topmost-Element-After-K-Moves/Readme.md b/Greedy/2202.Maximize-the-Topmost-Element-After-K-Moves/Readme.md new file mode 100644 index 000000000..533bde3bc --- /dev/null +++ b/Greedy/2202.Maximize-the-Topmost-Element-After-K-Moves/Readme.md @@ -0,0 +1,15 @@ +### 2202.Maximize-the-Topmost-Element-After-K-Moves + +我们逐个元素考虑,对于第i个元素(以1-index记),它能否被K次操作之后排在队列的首位? + +首先,我们为了将第i个元素放在首位,我们必须把前i-1个元素拿掉。这i-1次的remove操作是可以提前做而不影响最终结果的。注意,push的操作不能提前做,因为必须要先有removed的元素才能使得push操作有意义;反之,remove的操作不依赖于push,可以放心地提前。 + +将k自减i-1后,我们的目标元素就已经在队列首位了。此时假设我们还有k次操作要做,那么就分情况讨论。 + +1. k==0,那么无需操作,现状就是期待的效果 +2. k==1,那么此时我们无论是remove还是push,都无法再让第i个元素出现在队列首位了。 +3. 如果k是偶数,那么显然,我们可以通过反复remove和push队首元素,使得第i个元素始终保持在队列首位。 +4. 如果k是奇数,其实也有办法。那就是先remove当前的两个元素、再push回第i个元素。然后再反复remove和push第i个元素,消耗偶数次的操作。注意,这个方法的前提是存在第i+1个元素。 +5. 类似于4有一种容易被忽略的想法。那就是先remove第i个元素,再依次push会第i-1个元素和第i个元素。然后再反复remove和push第i元素,消耗偶数次的操作。注意这个方法不需要存在第i+1个元素,但需要存在第i-1个元素。 + +以上几种情况能保证第i个元素能通过操作后被放置在队列首位。我们在所有符合条件的元素里找最大值即可。 diff --git a/Greedy/2216.Minimum-Deletions-to-Make-Array-Beautiful/2216.Minimum-Deletions-to-Make-Array-Beautiful.cpp b/Greedy/2216.Minimum-Deletions-to-Make-Array-Beautiful/2216.Minimum-Deletions-to-Make-Array-Beautiful.cpp new file mode 100644 index 000000000..00eab6dba --- /dev/null +++ b/Greedy/2216.Minimum-Deletions-to-Make-Array-Beautiful/2216.Minimum-Deletions-to-Make-Array-Beautiful.cpp @@ -0,0 +1,32 @@ +class Solution { +public: + int minDeletion(vector& nums) + { + int n = nums.size(); + int ret = 0; + + int i = 0; + + while (i& nums, int k) + { + sort(nums.begin(), nums.end()); + int n = nums.size(); + + vectorpresum(n); + for (int i=0; idiff(n); + for (int i=0; i& flowers, long long newFlowers, int target, int full, int partial) + { + sort(flowers.begin(), flowers.end()); + + LL ret0 = 0; + while (flowers.size()>0 && flowers.back()>=target) + { + ret0 += full; + flowers.pop_back(); + } + if (flowers.empty()) return ret0; + + LL n = flowers.size(); + vectorpresum(n); + for (LL i=0; idiff(n); + for (LL i=0; i=0; i--) + { + if (newFlowers < 0) break; + + if (presum[i]+newFlowers >= (LL)(target-1)*(i+1)) + { + ret = max(ret, (LL)(target-1)*partial + (LL)(n-1-i)*full); + } + else + { + auto iter = upper_bound(diff.begin(), diff.begin()+i+1, newFlowers); + int k = prev(iter) - diff.begin(); + LL total = presum[k] + newFlowers; + LL each = total / (LL)(k+1); + ret = max(ret, each*partial + (LL)(n-1-i)*full); + } + newFlowers -= target-flowers[i]; + } + + if (newFlowers>=0) + ret = max(ret, n*full); + + return ret + ret0; + } +}; diff --git a/Greedy/2234.Maximum-Total-Beauty-of-the-Gardens/Readme.md b/Greedy/2234.Maximum-Total-Beauty-of-the-Gardens/Readme.md new file mode 100644 index 000000000..2cdc2bd2a --- /dev/null +++ b/Greedy/2234.Maximum-Total-Beauty-of-the-Gardens/Readme.md @@ -0,0 +1,13 @@ +### 2234.Maximum-Total-Beauty-of-the-Gardens + +很明显,我们要将所有的花园分为两部分,一部分是将其变为complete,另一部分是保持incomplete。相比之下,我们肯定是将那些flowers数值已经较大的花园变为complete更为合算,因为能省下更多的种植配额去提升那些incomplete花园的短板。 + +于是我们自然会将flowers数组从小到大排序。我们需要遍历两种花园的分界点i,即编号0到i的花园必须都是incomplete,编号i+1到n-1的花园必须都是complete。对于后者,我们很容易算出所需要额外种植的数目:即忽略已经超过target的花园,将那些未满target的花园补足至target。于是我们将这个需要补足的数目从newFlowers中减去,可以知道剩余的配额,将会用来处理那些认定是incomplete的花园(即编号从0到i的花园)。此外,根据规则,后者部分我们的收益是```(n-i)*full```. + +ok,此时我们的任务就是,在0到i的花园里分配剩余的newFlowers,使得可以将这些花园里的最小数目最大化(因为这部分的收益函数是最小的数目乘以partial)。但是需要注意一点的是,因为我们认定了这些花园是incomplete的,它们注定都不能超过target-1. 所以我们需要考虑第一种情况,如果newFlowers配额非常充裕,我们可以将这些incomplete的花园都补足到target-1,于是可以得分```(target-1)*partial```. + +第二种情况,就是newFlowers配额有限,只能用于增补那些数目最少的花园,以提升短板。此时我们就要考虑将可以将短板提升至多少?所谓提升短板,就是说我们需要确定一个位置p,使得newFlowers可以将编号从0到p的花园都提升到同一个数值。这个p是怎么得到的呢,其实是因为newFlowers不够大,只能将编号0到p的花园提升到同一个数值,但是无法将编号0到p+1的花园提升到同一个数值。所以我们就明确了p的意义,即寻找最大的p,使得```sum[0:p] + newFlowers >= flowers[p]*(p+1)```,或者说```flowers[p]*(p+1) - presum[p] <= newFlowers```。 我们令```diff[p] = flowers[p]*(p+1) - presum[p]```,即表示将前p+1个花园都提升至flowers[p]的数量所需要额外种植的配额,那么diff数值显然是个单调增函数,我们必然可以用二分法确定这个临界位置p。有了这个临界位置p,那么我们就可以用```(presum[p]+newFlowers) / (p+1)```来计算我们将前p+1个花园的短板最大提升至多少。 + +所以本题的时间复杂度是o(NlogN),外层就是遍历incomplete/complete的分界点i,内部就是二分查找短板的范围p。 + +事实上p肯定也是单调从大到小变化的,所以搜索的过程理论上可以优化至o(N),不过因为我们最开始需要对flowers排序,这点优化就没有必要了. diff --git a/Greedy/2242.Maximum-Score-of-a-Node-Sequence/2242.Maximum-Score-of-a-Node-Sequence.cpp b/Greedy/2242.Maximum-Score-of-a-Node-Sequence/2242.Maximum-Score-of-a-Node-Sequence.cpp new file mode 100644 index 000000000..e8960fbb2 --- /dev/null +++ b/Greedy/2242.Maximum-Score-of-a-Node-Sequence/2242.Maximum-Score-of-a-Node-Sequence.cpp @@ -0,0 +1,37 @@ +class Solution { + vector> nxt[50000]; +public: + int maximumScore(vector& scores, vector>& edges) + { + int n = scores.size(); + for (auto edge: edges) + { + int a= edge[0], b=edge[1]; + nxt[a].push_back({scores[b],b}); + nxt[b].push_back({scores[a],a}); + } + for (int i=0; i 3) + nxt[i].resize(3); + } + + int ret = -1; + + for (auto edge: edges) + { + int a= edge[0], b=edge[1]; + for (auto& [_, i] : nxt[a]) + for (auto & [_, j] : nxt[b]) + { + if (i==j) continue; + if (i==b || j==a) continue; + ret = max(ret, scores[i]+scores[a]+scores[b]+scores[j]); + break; + } + } + + return ret; + } +}; diff --git a/Greedy/2242.Maximum-Score-of-a-Node-Sequence/Readme.md b/Greedy/2242.Maximum-Score-of-a-Node-Sequence/Readme.md new file mode 100644 index 000000000..8b68f67a4 --- /dev/null +++ b/Greedy/2242.Maximum-Score-of-a-Node-Sequence/Readme.md @@ -0,0 +1,9 @@ +### 2242.Maximum-Score-of-a-Node-Sequence + +因为题目需要找的序列只有四个节点、三条边,所以我们可以通过穷举中间的边,来暴力枚举所有的节点组合。 + +比如,如果我们考察以(a,b)为中间的边,那么我们接下来就要找a的一个邻接节点i,b的一个邻接节点j,组成i-a-b-j的序列。为了使得节点的score总和最大,原则上贪心地找a的最大邻居、b的最大邻居。但是容易想到会有一些特殊情况,比如a的最大邻居恰好是b或者j的话,我们就无法保证这个序列的四个节点是互异的。同理,b的最大邻居也有这样的问题。 + +那么我们是否该枚举所有a的邻居与b的邻居的组合,考察所有可能的{i,j}再取最大值吗?其实不必。根据之前的分析,我们只需要考察a的最大的三个邻居即可,这样就一定能找到一个不与b和j重复的。同理,我们也只保留b的最大的三个邻居。这样,最多9组配对,必然可以找到一组互异的i-a-b-j,我们取其中的最大值即可。 + +本题的时间复杂度就是```o(ElogE + 9*E)``` diff --git a/Greedy/2250.Count-Number-of-Rectangles-Containing-Each-Point/2250.Count-Number-of-Rectangles-Containing-Each-Point.cpp b/Greedy/2250.Count-Number-of-Rectangles-Containing-Each-Point/2250.Count-Number-of-Rectangles-Containing-Each-Point.cpp new file mode 100644 index 000000000..b78c701ba --- /dev/null +++ b/Greedy/2250.Count-Number-of-Rectangles-Containing-Each-Point/2250.Count-Number-of-Rectangles-Containing-Each-Point.cpp @@ -0,0 +1,28 @@ +class Solution { +public: + vector countRectangles(vector>& rectangles, vector>& points) + { + sort(rectangles.begin(), rectangles.end()); + for (int i=0; icount(101); + vectorrets(points.size()); + for (int i=points.size()-1; i>=0; i--) + { + while (j>=0 && rectangles[j][0]>=points[i][0]) + { + count[rectangles[j][1]]++; + j--; + } + int total = 0; + for (int h=100; h>=points[i][1]; h--) + total+=count[h]; + rets[points[i][2]] = total; + } + + return rets; + } +}; diff --git a/Greedy/2250.Count-Number-of-Rectangles-Containing-Each-Point/Readme.md b/Greedy/2250.Count-Number-of-Rectangles-Containing-Each-Point/Readme.md new file mode 100644 index 000000000..21befe316 --- /dev/null +++ b/Greedy/2250.Count-Number-of-Rectangles-Containing-Each-Point/Readme.md @@ -0,0 +1,9 @@ +### 2250.Count-Number-of-Rectangles-Containing-Each-Point + +对于矩阵而言,它有宽和高两个属性。和1996类似的思想,我们必然会将其中一个属性排序,方便我们定位,然后再解决另一个属性。 + +如果我们将所有的矩阵按照宽度排序之后,对于任何一个点P(x,y),我们就可以很容易定位到哪些矩阵在宽度方向是可以包括该点的(也就是所有宽度大于等于y的矩阵),我们把这些矩阵放入一个pool中。但是在高度方向上,pool里面的这些矩阵的高度值是参差不齐的,无法快速定位有多少矩阵的高度大于等y。有人会说把这些矩阵的高度值放入一个有序的容器中,但是常规的红黑树虽然支持二分定位这些高度值里第一个大于等于y的位置,但是它无法告诉大于等于y的值总共有多少个。 + +此时最关键的地方就是题目给的数据范围。我们发现矩阵的高度值的范围只有100,因此我们将1到100每个刻度作为buckets来处理,即每个bucket存储有多少个对应高度值的矩阵。于是只需要最多遍历100次,就可以知道pool里矩阵的高度值有多少是大于等于y的了。 + +综上,我们按照横坐标从大到小处理每个点。对于任意一个点(x,y),我们就可以把若干个符合宽度条件x>=x的矩形加入pool中,统计高度值的分布。再遍历100次,计算有多少高度值是大于等于y的。所以总的时间复杂度就是```o(rectangles)*100```. diff --git a/Greedy/2257.Count-Unguarded-Cells-in-the-Grid/2257.Count-Unguarded-Cells-in-the-Grid.cpp b/Greedy/2257.Count-Unguarded-Cells-in-the-Grid/2257.Count-Unguarded-Cells-in-the-Grid.cpp new file mode 100644 index 000000000..ae14994d1 --- /dev/null +++ b/Greedy/2257.Count-Unguarded-Cells-in-the-Grid/2257.Count-Unguarded-Cells-in-the-Grid.cpp @@ -0,0 +1,34 @@ +class Solution { +public: + int countUnguarded(int m, int n, vector>& guards, vector>& walls) + { + vector>matrix(m, vector(n)); + for (auto guard: guards) + matrix[guard[0]][guard[1]] = 2; + for (auto wall: walls) + matrix[wall[0]][wall[1]] = 2; + + vector>dir({{1,0},{-1,0},{0,1},{0,-1}}); + for (auto guard: guards) + for (auto [dx, dy]: dir) + { + int i = guard[0], j = guard[1]; + while (1) + { + i+=dx; + j+=dy; + if (i<0||i>=m || j<0||j>=n) break; + if (matrix[i][j]==2) break; + matrix[i][j] = 1; + } + } + + int ret = 0; + for (int i=0; ipos(26, -1); + long long result=0; + for (int i=0; ilast(26, -1); + + long long ret = 0; + for (int i=0; i& nums) { + int ret = solve(nums); + reverse(nums.begin(),nums.end()); + ret = min(ret, solve(nums)); + return ret; + } + + int solve(vector& nums) { + int res = 0; + priority_queue que; + for(auto num: nums) { + if(!que.empty() && que.top()>num) { + res += que.top() - num; + que.pop(); + que.push(num); + } + que.push(num); + } + return res; + } +}; diff --git a/Greedy/2263.Make-Array-Non-decreasing-or-Non-increasing/Readme.md b/Greedy/2263.Make-Array-Non-decreasing-or-Non-increasing/Readme.md new file mode 100644 index 000000000..40827c503 --- /dev/null +++ b/Greedy/2263.Make-Array-Non-decreasing-or-Non-increasing/Readme.md @@ -0,0 +1,27 @@ +### 2263.Make-Array-Non-decreasing-or-Non-increasing + +此题有nlogn的贪心解法,但理解起来比较有难度。我们通过例子来解释。我们先考虑如何实现一个非递减序列。 + +假设数组前三个元素已经是```2,6,8```,那么我们必须不需要任何操作。 + +如果第四个元素是5,那么我们要让最后两个元素“非递减”,无论如何至少要消耗三次操作。比如将第四个元素升为8。此外还有其他的操作,比如将最后两个元素都变成6,将最后两个元素都变成7,等等。这些操作的代价都相同,即都是3. 在这里,为了在后续更容易地构造“非递减”序列,我们这就保守地将第三和第四个元素都变成5(同样花费3个代价)。此时我们将序列写作 +``` +idx 1, 2, (3, 4) +val 2, 6, 5 +``` +注意,这里我们将第三个元素和第四个元素“捆绑”在一起,让它们今后“共进退”,也就是永远取相同的值,这样能够让后面的元素更容易地“递增”地接上去。 + +此时会说,这样的序列并不是“非递减”的呀。没关系,之前我们分析过(3,4)这两个元素,我们可以让它们都是5,也可以让它们都是6,也可以让它们都是7或者8,这些方案都不改变之前付出的代价3,我们称之为“弹性范围”. 我们现在只是标注了弹性范围的下限,而它们其实可以在不增加消耗地前提下,匹配它们前面的任意元素的值,比如说6,就可以与第二个元素相接了成为“非递增”了. 事实上,无论如何,(3,4)这两个元素一定可以“无代价”地与之前的元素成功相接:这是因为(3,4)两个元素弹性变化范围的上限是8,而8是之前序列里的最大元素,6只不过是次大元素,所以定会在(3,4)两个元素的弹性变化范围内。 + +我们继续这个例子,假如第五个元素是4。此时数组里的最大元素是第二个的6,所以我们必然想把4提升到6,或者把6降到4. 所以我们至少需要2个代价。换而言之,我们用2个代价,可以将(2,5)这两个元素在4,5,6之间弹性变化。我们记做 +``` +idx 1, (3, 4), (2, 5) +val 2, 5, 4 +``` +同样,我们虽然标记了4,但是因为(2,5)这两个元素的弹性范围是4~6,它必然可以在需要的时候与之前的(3,4)匹配(因为5是之前的次大值,小于6)。所以至此,我们用了3+2=5个必须付出的代价,但理论上可以保证前5个元素一定可以调整为“非递减”序列。 + +这里再说明一下,为什么我们总标记弹性区间的下限呢?当然是为了贪心地好让后面的元素以更小的代价接上来呀。 + +以后再遇到新的数字,步骤就与之前完全一致了。如果新元素比val数组里的所有元素都大,那么索性无代价地加入(反正以后还有机会削减)。如果新元素比val数组里的最大值小,那么我们就特意将最大值的元素与新元素“捆绑”处理(约定相同的值),并可以得到一个弹性区间(弹性区间内的代价不变)。区间的下限就是新元素的值,上限就是刚才的最大值。注意我们将捆绑后的元素放入val数组里的时候,只标记下限。此时我们一定有结论:最新捆绑的元素一定可以变换到某个数值,能与之前的元素拼接成“非递减”序列。 + +将以上的思路整理一下,程序只需要一个优先队列即可实现。 diff --git a/Greedy/2271.Maximum-White-Tiles-Covered-by-a-Carpet/2271.Maximum-White-Tiles-Covered-by-a-Carpet.cpp b/Greedy/2271.Maximum-White-Tiles-Covered-by-a-Carpet/2271.Maximum-White-Tiles-Covered-by-a-Carpet.cpp new file mode 100644 index 000000000..79f61561c --- /dev/null +++ b/Greedy/2271.Maximum-White-Tiles-Covered-by-a-Carpet/2271.Maximum-White-Tiles-Covered-by-a-Carpet.cpp @@ -0,0 +1,29 @@ +class Solution { + vectorpresum; +public: + int maximumWhiteTiles(vector>& tiles, int carpetLen) + { + sort(tiles.begin(), tiles.end()); + int n = tiles.size(); + presum.resize(n); + for (int i=0; i= tiles[j][1]) + j++; + int len = 0; + if (j>i) + len += presum[j-1] - (i==0?0:presum[i-1]); + if (j& candidates) + { + int ret = 0; + for (int i=0; i<31; i++) + { + int count = 0; + for (int x: candidates) + { + if ((x>>i)&1) + count++; + } + ret = max(ret, count); + } + return ret; + } +}; diff --git a/Greedy/2275.Largest-Combination-With-Bitwise-AND-Greater-Than-Zero/Readme.md b/Greedy/2275.Largest-Combination-With-Bitwise-AND-Greater-Than-Zero/Readme.md new file mode 100644 index 000000000..56f6d44c1 --- /dev/null +++ b/Greedy/2275.Largest-Combination-With-Bitwise-AND-Greater-Than-Zero/Readme.md @@ -0,0 +1,5 @@ +### 2275.Largest-Combination-With-Bitwise-AND-Greater-Than-Zero + +将若干个数Bitwise AND之后的结果S如果不为零,说明S至少有一个bit位不为零,也就是说所有的数在该bit位上不能有0存在。于是我们可以检查每个bit,统计有多少元素在该bit位上非零。假设有M个元素在某个二进制位上都是1,那么他们的AND结果必然就不是零。 + +显然,对于32bit的整形,我们检查每个位置之后,可以找到这样一个最大的M。但M是否是最终的答案呢,有没有可能更多呢?答案是否定的。如果有M+1个元素的AND结果非零,必然有一个bit位上该M+1个元素都非零。这和之前的假设“M是所有bit位上我们找到的非零元素最多的那个”相矛盾。 diff --git a/Greedy/2306.Naming-a-Company/2306.Naming-a-Company.cpp b/Greedy/2306.Naming-a-Company/2306.Naming-a-Company.cpp new file mode 100644 index 000000000..181739b70 --- /dev/null +++ b/Greedy/2306.Naming-a-Company/2306.Naming-a-Company.cpp @@ -0,0 +1,25 @@ +using LL = long long; +class Solution { +public: + long long distinctNames(vector& ideas) + { + vector>head2str(26); + for (string& idea: ideas) + head2str[idea[0]-'a'].insert(idea.substr(1)); + + LL ret = 0; + for (int i=0; i<26; i++) + for (int j=i+1; j<26; j++) + { + int dup = 0; + for (string x: head2str[i]) + if (head2str[j].find(x)!=head2str[j].end()) + dup++; + LL a = head2str[i].size() - dup; + LL b = head2str[j].size() - dup; + ret += a*b*2; + } + + return ret; + } +}; diff --git a/Greedy/2306.Naming-a-Company/Readme.md b/Greedy/2306.Naming-a-Company/Readme.md new file mode 100644 index 000000000..2eb5a1429 --- /dev/null +++ b/Greedy/2306.Naming-a-Company/Readme.md @@ -0,0 +1,9 @@ +### 2306.Naming-a-Company + +我们令{a}表示以字母a为首字母的后缀字符串的集合。同理有{b},{c}, ... + +根据题意,我们会将任意一个名字分成两部分看待:aA。前者是首字母,后者是除首字母外的后缀字符串。我们考虑任意两个名字aA和bB是否能配对呢?根据规则,aA + bB => aB + bA。 + +为了符合条件,aB不能出现在原始字符串中。也就是说,B不能出现在{a}里。类似的,bA不能出现在元素字符串中,即A不能出现在{b}里。所以想要aA和bB配对成功,{a}集合与{b}集合里面的相同元素都不能出现。而将这些元素从两个集合中都拿走后,{a}与{b}的元素就可以任意选取,都能保证 aA + bB => aB + bA 符合规则。 + +综上,我们用二层循环,考察不同的首字母组合,假设分别是x和y,且{x}有m个元素,{y}有n个元素,两个集合的共同元素是k个。那么就有```(m-k)*(n-k)*2```种符合规则的配对。最终将26x26层循环得到的结果相加。 diff --git a/Greedy/2311.Longest-Binary-Subsequence-Less-Than-or-Equal-to-K/2311.Longest-Binary-Subsequence-Less-Than-or-Equal-to-K_v1.cpp b/Greedy/2311.Longest-Binary-Subsequence-Less-Than-or-Equal-to-K/2311.Longest-Binary-Subsequence-Less-Than-or-Equal-to-K_v1.cpp new file mode 100644 index 000000000..23777ab2d --- /dev/null +++ b/Greedy/2311.Longest-Binary-Subsequence-Less-Than-or-Equal-to-K/2311.Longest-Binary-Subsequence-Less-Than-or-Equal-to-K_v1.cpp @@ -0,0 +1,63 @@ +class Solution { +public: + int longestSubsequence(string s, int k) + { + string t; + while (k>0) + { + if (k%2==0) + t.push_back('0'); + else + t.push_back('1'); + k/=2; + } + reverse(t.begin(), t.end()); + + int m = s.size(); + int n = t.size(); + + if (m=0; i--) + { + if (check(s,i,t, 0)) + { + ret = max(ret, countZeros(s, i) + n); + break; + } + } + + return ret ; + } + + int countZeros(string&s, int k) + { + int count = 0; + for (int i=0; i= (int)t.size() - j; + } + else + { + while (i0) + { + if (k%2==0) + t.push_back('0'); + else + t.push_back('1'); + k/=2; + } + reverse(t.begin(), t.end()); + + int m = s.size(); + int n = t.size(); + + if (m& buses, vector& passengers, int capacity) + { + int m = passengers.size(); + + sort(buses.begin(), buses.end()); + sort(passengers.begin(), passengers.end()); + + int j = 0; + int ret = -1; + + for (int i=0; i0) + { + cap--; + if (j>=1 && passengers[j]-1 != passengers[j-1]) + ret = passengers[j]-1; + else if (j==0) + ret = passengers[j]-1; + j++; + } + + if (cap > 0) + { + if (j>=1 && passengers[j-1]!=buses[i]) + ret = buses[i]; + else if (j==0) + ret = buses[i]; + } + } + + return ret; + } +}; diff --git a/Greedy/2332.The-Latest-Time-to-Catch-a-Bus/Readme.md b/Greedy/2332.The-Latest-Time-to-Catch-a-Bus/Readme.md new file mode 100644 index 000000000..6e3e02b8a --- /dev/null +++ b/Greedy/2332.The-Latest-Time-to-Catch-a-Bus/Readme.md @@ -0,0 +1,24 @@ +### 2332.The-Latest-Time-to-Catch-a-Bus + +我们想象,如果“我”不存在,只有已知的那些passengers,那么我们应该很容易这道哪些乘客上哪些车。而现在想做的基本方针,是看看能不能比某个乘客早一秒到达,这样就把他的位置挤占了(当然还有其他约束条件),从而搭上这班车。 + +我们先来写代码,看看当前这些passengers与车的匹配。这是一个明显的双指针 +```cpp + int j = 0; + for (int i=0; i0) + { + // passengers[j]可以乘坐buses[i] + cap--; + j++; + } + } +``` + +那么我们就考察,如果我们比passengers[j]早到一秒钟,能不能挤掉它的位置坐上buses[i]呢?显然,只要```passengers[j]-1!=passengers[j-1]```,我们就可以实现。 + +那么还有没有其他不需要挤占其他乘客的可能性呢?其实buses[i]不一定都坐满了。如果最后一个上车的乘客与班车开动的时刻之间有空隙,那么我们就直接在buses[i]时刻上车即可。 + +依据上面的算法,我们遍历每辆车,考察该车的每个乘客能否被挤占,或者是否可以卡点上车,就可以确定自己上车的时间。 diff --git a/Greedy/2333.Minimum-Sum-of-Squared-Difference/2333.Minimum-Sum-of-Squared-Difference.cpp b/Greedy/2333.Minimum-Sum-of-Squared-Difference/2333.Minimum-Sum-of-Squared-Difference.cpp new file mode 100644 index 000000000..16391a0ba --- /dev/null +++ b/Greedy/2333.Minimum-Sum-of-Squared-Difference/2333.Minimum-Sum-of-Squared-Difference.cpp @@ -0,0 +1,42 @@ +using LL = long long; +class Solution { +public: + long long minSumSquareDiff(vector& nums1, vector& nums2, int k1, int k2) + { + vectornums; + for (int i=0; ipresum(n); + presum[0] = nums[0]; + for (int i=1; i smallestTrimmedNumbers(vector& nums, vector>& queries) + { + int m = nums.size(), n = nums[0].size(); + vector>ans(n+1, vector(m)); + + for (int j=0; j>buckets(10); + + for (int j=0; jrets; + for (auto q: queries) + { + rets.push_back(ans[q[1]][q[0]-1]); + } + return rets; + } +}; diff --git a/Greedy/2343.Query-Kth-Smallest-Trimmed-Number/Readme.md b/Greedy/2343.Query-Kth-Smallest-Trimmed-Number/Readme.md new file mode 100644 index 000000000..3cb9c6995 --- /dev/null +++ b/Greedy/2343.Query-Kth-Smallest-Trimmed-Number/Readme.md @@ -0,0 +1,13 @@ +### 2343.Query-Kth-Smallest-Trimmed-Number + +本题的数据规模非常小,对每个query进行暴力处理也是可以过的。 + +这里介绍一个比较优秀的“基数排序”的算法。我们令ans[i][j]表示只保留i位数字时,排列第j小的元素的原始index。 + +我们先考虑只保留一位数字的情况。假设此时的元素排序后是这些:0(x),0(x),1(x),1(x),2(x),3(x),3(x),3(x),4(x),4(x)...其中括号里面的是对应的index,具体数字不重要,他们构成了ans[1][j]. + +然后我们考虑只保留两位数字的情况。这些元素的第二位数字只有10种可能0~9,并且我们知道第二位数字是排序的primary key。这里我们就有一个技巧能够加快排序。我们构造10个bucket。我们将第一轮有序的元素按照他们各自的第二位数字,按照先后顺序扔进不同的bucket里面。此时就有一个非常好的性质:不同的bucket之间的元素必然是有序的(bucket小靠前);同一个bucket内的元素也是有序的(先扔进去的是第一轮里的较小元素,必然会靠前)。此时我们相当于只用了线性的时间就实现了所有元素的排序。我们此时将所有元素按照bucket的顺序和bucket内部的先后顺序访问一遍,就得到了ans[2][j]. + +以此类推,我们可以实现保留三位数字的元素的排序ans[3][j],等等。 + +最终我们对于每个query,我们返回的其实就是ans[trim][k-1]. diff --git a/Greedy/2345.Finding-the-Number-of-Visible-Mountains/2345.Finding-the-Number-of-Visible-Mountains.cpp b/Greedy/2345.Finding-the-Number-of-Visible-Mountains/2345.Finding-the-Number-of-Visible-Mountains.cpp new file mode 100644 index 000000000..b913c51eb --- /dev/null +++ b/Greedy/2345.Finding-the-Number-of-Visible-Mountains/2345.Finding-the-Number-of-Visible-Mountains.cpp @@ -0,0 +1,28 @@ +class Solution { +public: + int visibleMountains(vector>& peaks) + { + sort(peaks.begin(), peaks.end(), [](vector&a, vector&b){ + int l1=a[0]-a[1], r1=a[0]+a[1]; + int l2=b[0]-b[1], r2=b[0]+b[1]; + if (l1!=l2) return l1r2; + }); + + int n = peaks.size(); + int rightMost = -1; + int ret = 0; + for (int i=0; i0 && peaks[i]==peaks[i-1]) continue; + if (peaks[i][0]+peaks[i][1] > rightMost) + { + rightMost = peaks[i][0]+peaks[i][1]; + if (i==n-1 || peaks[i]!=peaks[i+1]) + ret++; + } + } + + return ret; + } +}; diff --git a/Greedy/2345.Finding-the-Number-of-Visible-Mountains/Readme.md b/Greedy/2345.Finding-the-Number-of-Visible-Mountains/Readme.md new file mode 100644 index 000000000..72a6d08e9 --- /dev/null +++ b/Greedy/2345.Finding-the-Number-of-Visible-Mountains/Readme.md @@ -0,0 +1,9 @@ +### 2345.Finding-the-Number-of-Visible-Mountains + +本题的突破点在于这个发现:如果三角形A的左端点早于三角形B的左端点,那么A一定不会被B覆盖。所以将所有的三角形按照左端点排序,那我们能看到的三角形的顺序一定不会违反这个序列。 + +接下来思考,虽然A不会被B覆盖,但是B依然可能会被A覆盖。如何判定呢?其实就取决于A的右端点是否足够远。如果A的右端点足够远,那么它有可能还会覆盖后续的若干个三角形。 + +所以基本思想就是,将所有三角形按照左端点排序,遍历每个三角形的时候维护当前最远的右端点的位置far。任何新的三角形的右端点的位置如果在far前面,就说明它是会被前面的三角形所覆盖的。 + +本题的corner case是,如果有两个三角形的左端点相同,那如何排序?不难想到,我们先处理右端点更远的,这样就保证它能把其他的三角形给遮盖了。 diff --git a/Greedy/2350.Shortest-Impossible-Sequence-of-Rolls/2350.Shortest-Impossible-Sequence-of-Rolls.cpp b/Greedy/2350.Shortest-Impossible-Sequence-of-Rolls/2350.Shortest-Impossible-Sequence-of-Rolls.cpp new file mode 100644 index 000000000..af9906659 --- /dev/null +++ b/Greedy/2350.Shortest-Impossible-Sequence-of-Rolls/2350.Shortest-Impossible-Sequence-of-Rolls.cpp @@ -0,0 +1,21 @@ +class Solution { +public: + int shortestSequence(vector& rolls, int k) + { + int n = rolls.size(); + unordered_setSet; + + int ret = 0; + for (int i=n-1; i>=0; i--) + { + Set.insert(rolls[i]); + if (Set.size()==k) + { + ret++; + Set.clear(); + } + } + return ret+1; + + } +}; diff --git a/Greedy/2350.Shortest-Impossible-Sequence-of-Rolls/Readme.md b/Greedy/2350.Shortest-Impossible-Sequence-of-Rolls/Readme.md new file mode 100644 index 000000000..ef1907cf5 --- /dev/null +++ b/Greedy/2350.Shortest-Impossible-Sequence-of-Rolls/Readme.md @@ -0,0 +1,9 @@ +### 2350.Shortest-Impossible-Sequence-of-Rolls + +我们考虑长度为1是否满足。我们势必会想找到一个最短的区间[0:m],使得rolls[0:m]里面必然包括了1到k。这意味着仅在这个前缀里,我们可以构造任意的、长度为1的序列。如果找不到这样的m,自然本题就终止了。此时我们关注m这个位置,不难得知,rolls[m]一定是在这个区间里唯一出现的一个元素,不妨记做x。 + +此时我们考虑长度为2的情况。因为x是[0:m]中最“稀有”的元素(最晚出现、仅出现了一次),这意味着在所有的长度为2的序列中,以x打头的序列最不容易找到。根据前述,我们必须舍弃掉前m-1个元素,直到第m个元素我们才找到了x。于是我们就只需要考虑构造“x*”这种序列,为了能让`*`可以是任意元素,我们必须在m之后再找一段区间,记做[m+1,n],使得其中包含了1到k。如果能找到这样的n,那么意味着我们在[0:n]的前缀里可以构造任意的、长度为2的序列。类似地,我们需要注意到rolls[n]一定是[m+1:n]这个区间里唯一出现的一个元素,不妨记做y。 + +同理,我们在考虑长度为3的情况时,`xy*`是最难构造的。因为最短只有在[0:n]的前缀里才出现`xy`。所以接下来的任务就是从n+1开始找一段区间,需要包含1到k的所有元素... + +依次类推,我们发现本题的本质就是从rolls的起点,不停地寻找一段区间,使其恰好包含了1到k所有的元素。能连续找到多少个这样的区间,就意味着我们可以构造多长的序列,使得序列里的每一个元素都有可能是1到k。 diff --git a/Greedy/2365.Task-Scheduler-II/2365.Task-Scheduler-II.cpp b/Greedy/2365.Task-Scheduler-II/2365.Task-Scheduler-II.cpp new file mode 100644 index 000000000..866371460 --- /dev/null +++ b/Greedy/2365.Task-Scheduler-II/2365.Task-Scheduler-II.cpp @@ -0,0 +1,18 @@ +using LL = long long; +class Solution { +public: + long long taskSchedulerII(vector& tasks, int space) + { + int n = tasks.size(); + unordered_mapMap; + LL cur = 0; + for (int i=0; i& nums) + { + LL ret = 0; + for (int i = nums.size()-2; i>=0; i--) + { + LL x = nums[i+1]; + LL y = nums[i]; + if (y<=x) continue; + + LL k = y/x; + LL d = y%x; + if (d==0) + { + ret += k-1; + nums[i] = x; + continue; + } + + // d + k*p <= x - p + LL p = (x-d) / (k+1); + + LL x2 = x - p; + LL d2 = d + k*p; + + if (d2 < x2) + { + nums[i] = x2-1; + } + else + { + nums[i] = d2; + } + ret += k; + } + + return ret; + } +}; diff --git a/Greedy/2366.Minimum-Replacements-to-Sort-the-Array/2366.Minimum-Replacements-to-Sort-the-Array_v2.cpp b/Greedy/2366.Minimum-Replacements-to-Sort-the-Array/2366.Minimum-Replacements-to-Sort-the-Array_v2.cpp new file mode 100644 index 000000000..e68e842b5 --- /dev/null +++ b/Greedy/2366.Minimum-Replacements-to-Sort-the-Array/2366.Minimum-Replacements-to-Sort-the-Array_v2.cpp @@ -0,0 +1,28 @@ +using LL = long long; +class Solution { +public: + long long minimumReplacement(vector& nums) + { + LL ret = 0; + for (int i = nums.size()-2; i>=0; i--) + { + LL x = nums[i+1]; + LL y = nums[i]; + if (y<=x) continue; + + if (y%x==0) + { + ret += y/x-1; + nums[i] = x; + } + else + { + int k = y/x+1; + ret += y/x; + nums[i] = y/k; + } + } + + return ret; + } +}; diff --git a/Greedy/2366.Minimum-Replacements-to-Sort-the-Array/Readme.md b/Greedy/2366.Minimum-Replacements-to-Sort-the-Array/Readme.md new file mode 100644 index 000000000..040c70502 --- /dev/null +++ b/Greedy/2366.Minimum-Replacements-to-Sort-the-Array/Readme.md @@ -0,0 +1,28 @@ +### 2366.Minimum-Replacements-to-Sort-the-Array + +#### 解法1 + +我们从后往前看,对于最后一个数,我们肯定不会拆分。一旦将其拆分的话变小的话,那么前面的数就有更大的概率需要拆得更小。 + +接着假设最后一个数是x,倒数第二个数是y。如果y小于等于x,那么最后两个元素已经是递增关系,y就不用拆分了,理由同上。如果y大于x,那么就必须拆分y,那么怎么拆分呢? + +根据规则,我们想要尽量少地拆分,又不能拆出大于x的数(否则破坏递增),不难通过贪心的思想,知道我们需要尽量拆出完整的x来。假设y除以x的商是k,余数是d,那么我们有一个初始方案:就是拆成一个d,加上k个x。这一定保证了拆分的数目最少。但是d太小的话,会影响左侧元素迫使他们拆分地更细。所以我们试图在不改变这k+1拆份的前提下,尽量地抬升d。怎么抬升呢?显然是由这k个x来提供帮助。如果这k个x集体减少一,那么d就能抬升k。为什么这k个x需要集体行动呢?因为人多力量大啊,让某些x不出力的话留着也没有啥用,不如提供给d来加速d的抬升。 + +那么我们将d抬升多少呢?注意提升后的d不能比下降后的x高。因为我们想抬升d是因为d是当前y拆分出来的最小值,是制约前面元素拆分的“瓶颈”。所以最理想的情况是将y恰好拆分成均匀的k份。如果不行,那么我们就将d抬升至小于等于降低后的x。令所有的x降低p,则有比等式 +``` +d + p*k <= x - p +``` +这样得到 +``` +p <= (x-d) / (k+1) +``` +意思是p不能再大了,再大的话d要反超x了。 + +这样操作之后,原本的1个d和k个x,变成了1个d2和k个x2,其中`d2 = d + p`,`x2 = x - p`,且`d2 <= x2`. + +此时这是不是最优的操作呢?并不是。如果`d2 < x2`,其实我们可以将k个x2里面的一部分(而不是整体)拿出1来再贡献给d2,必然可以使得d2再拉至于x2-1平齐的高度。这是因为之前我们知道,如果k个x2每人都再贡献1出来,会导致`d2`会比`x2-1`还大。所以这意味着,如果贡献出部分的1出来,就能让`d2`与`x2-1`持平。在这种情况下,`x2-1`就是拆分出来的k+1份里的最小值。 + +于是,这个回合结束我们将nums[i]赋值为`x2`(如果`d2==x2`)或者`x2-1`(如果`d2prev(26, 0); + vectordp(n+1,1); + dp[0] = 0; + + int ret = 0; + for (int i=1; i<=n; i++) + { + for (int j=max(0,s[i]-'a'-k); j<=min(25, s[i]-'a'+k); j++) + { + int p = prev[j]; + dp[i] = max(dp[i], dp[p]+1); + } + prev[s[i]-'a'] = i; + ret = max(ret, dp[i]); + } + + return ret; + } +}; diff --git a/Greedy/2370.Longest-Ideal-Subsequence/2370.Longest-Ideal-Subsequence_v2.cpp b/Greedy/2370.Longest-Ideal-Subsequence/2370.Longest-Ideal-Subsequence_v2.cpp new file mode 100644 index 000000000..91fb684f6 --- /dev/null +++ b/Greedy/2370.Longest-Ideal-Subsequence/2370.Longest-Ideal-Subsequence_v2.cpp @@ -0,0 +1,21 @@ +class Solution { +public: + int longestIdealString(string s, int k) + { + int n = s.size(); + vectordp(26, 0); + + int ret = 0; + for (int i=0; i> minScore(vector>& grid) + { + int m = grid.size(), n = grid[0].size(); + vector>arr; + for (int i=0; irows(m,0); + vectorcols(n,0); + + for (int i=0; iarr; + + for (int i=0; i=mx+1; k--) + arr.push_back(k); + + mx = mx+count; + + i = j-1; + } + + string ret; + for (int i=0; i>& transactions) + { + sort(transactions.begin(), transactions.end(), [](vector&a, vector&b){return a[1]& nums, vector& target) + { + vectorodd1, odd2, even1, even2; + for (auto x: nums) + { + if (x%2==0) + even1.push_back(x); + else + odd1.push_back(x); + } + for (auto x: target) + { + if (x%2==0) + even2.push_back(x); + else + odd2.push_back(x); + } + + return helper(even1, even2) + helper(odd1, odd2); + } + + LL helper(vector&nums, vector&target) + { + sort(target.begin(), target.end()); + sort(nums.begin(), nums.end()); + + LL count = 0; + for (int i=0; i target[i]) + count += (nums[i]-target[i])/2; + + return count; + } + +}; diff --git a/Greedy/2449.Minimum-Number-of-Operations-to-Make-Arrays-Similar/Readme.md b/Greedy/2449.Minimum-Number-of-Operations-to-Make-Arrays-Similar/Readme.md new file mode 100644 index 000000000..63a124368 --- /dev/null +++ b/Greedy/2449.Minimum-Number-of-Operations-to-Make-Arrays-Similar/Readme.md @@ -0,0 +1,13 @@ +### 2449.Minimum-Number-of-Operations-to-Make-Arrays-Similar + +很显然题目的意思是,nums经过一系列操作之后,需要变成targets。于是nums和targets的数组元素之和必然相等,否则无法实现+2/-2的守恒。 + +另外,我们发现,偶数无论如何也无法操作成奇数,反之亦然。所以知道,需要将奇数偶数分开处理,即nums里的奇数需要多少操作转化为targets里的奇数,同理nums里的偶数需要多少操作否转化为targets里的偶数。 + +接下来,我们考虑只含有奇数的nums数组和只含有奇数的targets数组。很明显,我们必然会把nums[0]转化为targets[0],将nums[1]转化为targets[1],依次类推。这样能使得每对元素差的绝对值之和最小。简单的证明可以从两对开始研究。假设有`nums[i]0) + { + sum += x%10; + x/=10; + } + return sum; + } + + long long makeIntegerBeautiful(long long n, int target) + { + string ret; + int carry = 0; + while (n > 0) + { + if (DigitSum(n) <= target) break; + + int cur = n%10; + int d; + if (cur != 0) + { + ret.push_back('0' + (10-cur)); + carry = 1; + } + else + { + ret.push_back('0'); + carry = 0; + } + + n = n/10 + carry; + } + + if(ret.empty()) return 0; + reverse(ret.begin(), ret.end()); + return stoll(ret); + } +}; diff --git a/Greedy/2457.Minimum-Addition-to-Make-Integer-Beautiful/Readme.md b/Greedy/2457.Minimum-Addition-to-Make-Integer-Beautiful/Readme.md new file mode 100644 index 000000000..03ec1ea1a --- /dev/null +++ b/Greedy/2457.Minimum-Addition-to-Make-Integer-Beautiful/Readme.md @@ -0,0 +1,5 @@ +### 2457.Minimum-Addition-to-Make-Integer-Beautiful + +很明显,想要以最小的代价来降低digit sum,必然是从低位往高位,逐个加上一个“互补”的数字,使得将该位“清零”。即原数的某位上是2的话,你必然补上8,使得digit sum能够降低2. + +这里特别需要注意的是进位。例如原数是232,你补上一个8之后,你下一个考虑的十位数其实是4而不是3. diff --git a/Greedy/2459.Sort-Array-by-Moving-Items-to-Empty-Space/2459.Sort-Array-by-Moving-Items-to-Empty-Space.cpp b/Greedy/2459.Sort-Array-by-Moving-Items-to-Empty-Space/2459.Sort-Array-by-Moving-Items-to-Empty-Space.cpp new file mode 100644 index 000000000..b81483211 --- /dev/null +++ b/Greedy/2459.Sort-Array-by-Moving-Items-to-Empty-Space/2459.Sort-Array-by-Moving-Items-to-Empty-Space.cpp @@ -0,0 +1,28 @@ +class Solution { +public: + int sortArray(vector& nums) + { + int ret1 = helper(nums); + nums.insert(nums.begin(), nums.back()); + nums.pop_back(); + int ret2 = helper(nums); + return min(ret1, ret2); + } + + int helper(vectornums) + { + int n = nums.size(); + int count = 0; + for (int i=0; inums2(nums.begin(), nums.end()-1); + nums2.insert(nums2.begin(), nums.back()); + return min(helper(nums), helper(nums2)); +``` + +其次,我们考虑如何实现上述的helper函数,即将nums里的元素排序为0,1,2,...,n-1. + +我们容易想到贪心的策略:如果当前的0在位置i且i!=0,那么我们必然将i拿过来交换一次使之归位。然后继续看此时0所在的位置j,如果仍然j!=0,那么同样必然把j拿过来使之归位。接着继续查看0,直至0到了队首。我们发现,可以用k次操作,将k+1个元素(包括0)归到正确的位置。这k+1个元素本质上val-index构成了一个环。 + +其实有另外一种等效的归位操作,我们称之为index sort:只盯着index=0上的数i,如果它不是0,那么就把它与nums[i]交换使得i归位;再看此时index=0上的数j,如果它还不是0,继续将其与nums[j]交换使得j归位。直至index=0上的数变成0. 同样的结论:如果交换了k次,意味着我们已经将k+1个元素(包括0)归到正确的位置。 + +接下来我们看其他的位置。如果某个位置i上的数字不是i,我们同样用上面的方法,通过若干次交换使得位置i上得到i。例如: +``` +idx: 1 2 3 +val: 3 1 2 +``` +第一次操作后: +``` +idx: 1 2 3 +val: 2 1 3 +``` +第二次操作后: +``` +idx: 1 2 3 +val: 1 2 3 +``` + +但是注意到这两次操作我们都没有利用到0,这是不符合要求的。那么该如何通过0来实现呢?其实只要开头增加一步,将之前已经归位的0与位置1上的元素再交换,即额外增加了一个没有归位的0: +``` +idx: 0 1 2 3 +val: 3 0 1 2 +``` +我们发现此时val-index的环上有四个元素了,同上的理论,我们只需要做三次index sort就能将这四个元素归位。加上之前额外的一次操作,总共是四次。 + +于是得出结论:如果某个非0的位置i上的数字不是i,并且通过k次交换(index sort)可以使得位置i上出现i的话,那么实际上借助0的话,我们需要k+2次交换(index sort)来实现。 + +综上:我们对每个位置i尝试index sort,假设k次操作后能够将数值i出现在位置i上,那么(1) 如果i是0,我们就将总操作数增加k;(2)如果i不是0,我们将总操作数增加k+2. + diff --git a/Greedy/2471.Minimum-Number-of-Operations-to-Sort-a-Binary-Tree-by-Level/2471.Minimum-Number-of-Operations-to-Sort-a-Binary-Tree-by-Level.cpp b/Greedy/2471.Minimum-Number-of-Operations-to-Sort-a-Binary-Tree-by-Level/2471.Minimum-Number-of-Operations-to-Sort-a-Binary-Tree-by-Level.cpp new file mode 100644 index 000000000..1708f0a3f --- /dev/null +++ b/Greedy/2471.Minimum-Number-of-Operations-to-Sort-a-Binary-Tree-by-Level/2471.Minimum-Number-of-Operations-to-Sort-a-Binary-Tree-by-Level.cpp @@ -0,0 +1,50 @@ +/** + * Definition for a binary tree node. + * struct TreeNode { + * int val; + * TreeNode *left; + * TreeNode *right; + * TreeNode() : val(0), left(nullptr), right(nullptr) {} + * TreeNode(int x) : val(x), left(nullptr), right(nullptr) {} + * TreeNode(int x, TreeNode *left, TreeNode *right) : val(x), left(left), right(right) {} + * }; + */ +class Solution { + vectorlevel[100005]; + int maxDepth = 0; +public: + int minimumOperations(TreeNode* root) + { + dfs(root, 0); + int count = 0; + for (int t=0; t<=maxDepth; t++) + { + auto& nums = level[t]; + auto sorted = nums; + sort(sorted.begin(), sorted.end()); + unordered_maprank; + for (int i=0; ival); + dfs(node->left, depth+1); + dfs(node->right, depth+1); + } +}; diff --git a/Greedy/2471.Minimum-Number-of-Operations-to-Sort-a-Binary-Tree-by-Level/Readme.md b/Greedy/2471.Minimum-Number-of-Operations-to-Sort-a-Binary-Tree-by-Level/Readme.md new file mode 100644 index 000000000..62d16b8f3 --- /dev/null +++ b/Greedy/2471.Minimum-Number-of-Operations-to-Sort-a-Binary-Tree-by-Level/Readme.md @@ -0,0 +1,7 @@ +### 2471.Minimum-Number-of-Operations-to-Sort-a-Binary-Tree-by-Level + +将属于同一个level的数字都收集起来。然后用Indexing sort的方法去贪心地交换。 + +比如说,对于一个乱序的nums数组,我们可以提前知道每个数字的期望位置rank[nums[i]]。我们就从前往后查看每一个位置,如果当前的`rank[nums[i]]!=i`,那么就把nums[i]与位于rank[nums[i]]的数字交换。这样的交换可以持续多次,直至我们在i这个位置上迎来期望的数字。 + +为什么一定能够迎来期望的数字呢?因为每一次交换,我们都把一个数字送到了它应该在的位置。一旦把n-1个数字都放到了它们对应期望的位置,那么i这个位置的数字一定也已经安排到了期望的数字。 diff --git a/Greedy/2498.Frog-Jump-II/2498.Frog-Jump-II.cpp b/Greedy/2498.Frog-Jump-II/2498.Frog-Jump-II.cpp new file mode 100644 index 000000000..6f359b922 --- /dev/null +++ b/Greedy/2498.Frog-Jump-II/2498.Frog-Jump-II.cpp @@ -0,0 +1,15 @@ +class Solution { +public: + int maxJump(vector& stones) + { + int n = stones.size(); + + if (n==2) + return stones[1]; + + int ret = 0; + for (int i=0; i+2b->i->c。显然,我们应该让a处的青蛙先落地休息得到跨度a->i,而另一只青蛙则需要再跳至少b->c。相反,另外一种方案,让b处的青蛙先落地的话,那么a处的青蛙之后必然至少要跳跃一个更大的跨度a->c。显然这个方案是不及前者优秀的。所以得到一个结论,任何时候,都让离得更远的青蛙先落地。 + +这个结论的推论就非常有趣,该方案必然导致了两个青蛙轮流落地。所以,最优解法就是找全局最大的`stones[i+2]-stones[i]`. diff --git a/Greedy/2499.minimum-total-cost-to-make-arrays-unequal/2499.minimum-total-cost-to-make-arrays-unequal.cpp b/Greedy/2499.minimum-total-cost-to-make-arrays-unequal/2499.minimum-total-cost-to-make-arrays-unequal.cpp new file mode 100644 index 000000000..ac9f837af --- /dev/null +++ b/Greedy/2499.minimum-total-cost-to-make-arrays-unequal/2499.minimum-total-cost-to-make-arrays-unequal.cpp @@ -0,0 +1,47 @@ +using LL = long long; +class Solution { +public: + long long minimumTotalCost(vector& nums1, vector& nums2) + { + int n = nums1.size(); + unordered_mapcount; + int total = 0; + LL ret = 0; + for (int i=0; i(0,0) +2. (0,1)->(1,1) +3. (1,0)->(1,1) +4. (1,1)->(1,0) + +从中我们发现,只要s中存在一个1,它就可以将其他任何位置上的0->1或者1->0,可以进行任何想要的变化。变化之后,我们需要考虑s里的这个1本身是否需要调整。如果它不需要调整(即target对应的元素也是1),那么就ok。如果它需要调整(即target对应的元素是0),那么我们需要s里的其他已经变换过的位置上存在1来帮助我们调整,对应这意味着target上也必须存在着1. + +所以,只有当s里面有1,且t里面也有一个1(可以与s里的那个1在同一个位置,对应第一种情况;也可以在不同的位置,对应第二种情况),那么就可以实现变化。反之,如果s里面都是0,或者t里面都是0的话,就无法实现变换。 diff --git a/Greedy/2551.Put-Marbles-in-Bags/2551.Put-Marbles-in-Bags.cpp b/Greedy/2551.Put-Marbles-in-Bags/2551.Put-Marbles-in-Bags.cpp new file mode 100644 index 000000000..a6d00c9c3 --- /dev/null +++ b/Greedy/2551.Put-Marbles-in-Bags/2551.Put-Marbles-in-Bags.cpp @@ -0,0 +1,24 @@ +using LL = long long; +class Solution { +public: + long long putMarbles(vector& weights, int k) + { + int n = weights.size(); + if (n==1) return 0; + + vectorarr; + for (int i=0; i& p, int k) + { + int n = p.size(); + if (p[n-1]-p[0] <= 2*k) return p.size(); + + vectorpre(n); + vectorpost(n); + + int i = 0; + int mx = 0; + for (int j=0; jk) + i++; + mx = max(mx, j-i+1); + pre[j] = mx; + } + + int j = n-1; + mx = 0; + for (int i=n-1; i>=0; i--) + { + while (p[j]-p[i]>k) + j--; + mx = max(mx, j-i+1); + post[i] = mx; + } + + int ret = 0; + for (int i=0; i+1k`时,说明`j-i`就是以i为左端点的、长度为k的区间的最大元素数目,我们记做left[i]。同理,我们也可以计算以j为右端点的、长度为k的区间的最大元素数目right[j]。 + +那么回到两个区间的问题。因为两段区间不重合,所以我们只要找一个分界点k,在k左边找一个最大数目区间(注意其右端点不一定就是在k),在k右边找一个最大数目区间(注意其左端点也不一定就是k),两者之和就是以k为分界点所能得到的两个区间。注意到,前者是right[j]的前缀Max,后者是left[i]的后缀Max。 diff --git a/Greedy/2561.Rearranging-Fruits/2561.Rearranging-Fruits.cpp b/Greedy/2561.Rearranging-Fruits/2561.Rearranging-Fruits.cpp new file mode 100644 index 000000000..0fadebbb7 --- /dev/null +++ b/Greedy/2561.Rearranging-Fruits/2561.Rearranging-Fruits.cpp @@ -0,0 +1,38 @@ +class Solution { +public: + long long minCost(vector& basket1, vector& basket2) + { + mapMap; + for (int x: basket1) + Map[x]++; + for (int x: basket2) + Map[x]--; + + int t = Map.begin()->first; + + vectora; + for (auto [k,v]: Map) + { + if (v%2!=0) return -1; + if (v>0) + { + for (int i=0; ileft(n, m); + int j = 0; + for (int i=0; iright(n, -1); + j = m-1; + for (int i=n-1; i>=0; i--) + { + while (j>=0 && s[j]!=t[i]) + j--; + if (j>=0) + { + right[i] = j; + j--; + } + } + + int low = 0, high = n; + while (low < high) + { + int mid = low+(high-low)/2; + if (isOK(mid, s, t, left, right)) + high = mid; + else + low = mid+1; + } + + return low; + } + + bool isOK(int len, string& s, string& t, vector&left, vector&right) + { + int m = s.size(); + int n = t.size(); + + if (right[len]>=0) return true; + if (left[n-len-1] < m) return true; + + for (int i=1; i+len& nums) + { + sort(nums.begin(), nums.end()); + int n = nums.size(); + return min({nums[n-2]-nums[1], nums[n-1]-nums[2], nums[n-3]-nums[0]}); + } +}; diff --git a/Greedy/2567.Minimum-Score-by-Changing-Two-Elements/Readme.md b/Greedy/2567.Minimum-Score-by-Changing-Two-Elements/Readme.md new file mode 100644 index 000000000..f09858e8f --- /dev/null +++ b/Greedy/2567.Minimum-Score-by-Changing-Two-Elements/Readme.md @@ -0,0 +1,7 @@ +### 2567.Minimum-Score-by-Changing-Two-Elements + +设想,如果贪心地将两个修改的名额都用来降低 high score,我们可以有三种方法:(1) 将最大值改为次大值,最小值改为次小值,这样high score就是`nums[n-2]-nums[1]`. (2) 将最小的两个值都改为第三小的值,这样high score就是`nums[n-1]-nums[2]`. (2) 将最大的两个值都改为第三大的值,这样high score就是`nums[n-3]-nums[0]`. + +此时我们发现,我们选取上述的哪个方案,因为出现了重复元素,所以low score都是零。 + +所以答案就是在三个high score里面挑最小值即可。 diff --git a/Greedy/2568.Minimum-Impossible-OR/2568.Minimum-Impossible-OR_v1.cpp b/Greedy/2568.Minimum-Impossible-OR/2568.Minimum-Impossible-OR_v1.cpp new file mode 100644 index 000000000..6776497aa --- /dev/null +++ b/Greedy/2568.Minimum-Impossible-OR/2568.Minimum-Impossible-OR_v1.cpp @@ -0,0 +1,17 @@ +class Solution { +public: + int minImpossibleOR(vector& nums) + { + sort(nums.begin(), nums.end()); + int mx = 0; + for (int i=0; i mx+1) + return mx+1; + else + mx = (mx | nums[i]); + } + + return mx+1; + } +}; diff --git a/Greedy/2568.Minimum-Impossible-OR/2568.Minimum-Impossible-OR_v2.cpp b/Greedy/2568.Minimum-Impossible-OR/2568.Minimum-Impossible-OR_v2.cpp new file mode 100644 index 000000000..e7c6c54c3 --- /dev/null +++ b/Greedy/2568.Minimum-Impossible-OR/2568.Minimum-Impossible-OR_v2.cpp @@ -0,0 +1,13 @@ +class Solution { +public: + int minImpossibleOR(vector& nums) + { + unordered_setSet(nums.begin(), nums.end()); + for (int i=0; i<31; i++) + { + if (Set.find(1<mx+1`,那么我们如论如何都无法构造出mx+1来。 + +同理,本题里的或运算和加法运算有着相同的性质:越搞数越大。假设前i-1个元素里,我们能构造连续的自然数[1,mx],那么如果`nums[i]<=mx+1`,那么意味着前i个元素里,我们可以任意构造[1,mx|num[i]]里的元素。反之,如果`nums[i]>mx+1`,那么将nums[i]与任何[1,mx]里面的元素进行操作,得到的都会比nums[i]还大,我们如论也无法构造出mx+1来。 + +#### 解法2 +假设我们能够构造出2^0,2^1,..,2^k,那么意味着[1,2^(k+1)-1]里面的任何元素都能构造出来。但是我们肯定无法构造出2^(k+1),所以我们只需要查看2^(k+1)是否在数组里即可。如果在的话,那么递归处理,我们只需要查看`2^(k+2)`是否在数组里即可。即本题求的就是最小的、不在数组里的2的幂。 diff --git a/Greedy/2571.Minimum-Operations-to-Reduce-an-Integer-to-0/2571.Minimum-Operations-to-Reduce-an-Integer-to-0.cpp b/Greedy/2571.Minimum-Operations-to-Reduce-an-Integer-to-0/2571.Minimum-Operations-to-Reduce-an-Integer-to-0.cpp new file mode 100644 index 000000000..c57e673de --- /dev/null +++ b/Greedy/2571.Minimum-Operations-to-Reduce-an-Integer-to-0/2571.Minimum-Operations-to-Reduce-an-Integer-to-0.cpp @@ -0,0 +1,21 @@ +class Solution { +public: + int minOperations(int n) + { + int ret =0 ; + for (int i=0; i<31; i++) + { + if (count(n+(1<>& lcp) + { + int n = lcp.size(); + string s(n, '0'); + + int i = 0; + for (char ch = 'a'; ch<='z'; ch++) + { + while (i>dp(n, vector(n,0)); + for (int i=n-1; i>=0; i--) + for (int j=n-1; j>=0; j--) + { + if (s[i]==s[j]) + dp[i][j] = (i==n-1 || j==n-1)? 1: (dp[i+1][j+1] + 1); + if (dp[i][j] != lcp[i][j]) + return ""; + } + + return s; + } +}; diff --git a/Greedy/2573.Find-the-String-with-LCP/Readme.md b/Greedy/2573.Find-the-String-with-LCP/Readme.md new file mode 100644 index 000000000..81a5db7cd --- /dev/null +++ b/Greedy/2573.Find-the-String-with-LCP/Readme.md @@ -0,0 +1,11 @@ +### 2573.Find-the-String-with-LCP + +首先,我们知道,如果lcp[i][j]>=0,那么一定意味着s[i]==s[j],这意味着我们知道任意两个字符是否相等的信息。假设LCP的信息是准确的,那么仅凭这些信息我们就可以充分地构造出字符串来。 + +我们先考察s中第一个未填写的位置i,此时必然是s[0]。由于我们只有字符相等的约束,而没有字符大小的约束,所以为了构造字典序最小的字符串,我们必然将s[0]填写为'a'。此时我们只需要考察所有`lcp[0][j]>0`的位置j,那么必然有s[j]=s[0]='a'。当然如果我们发现s[j]已经被填写过了且不是'a',那么说明引出了矛盾,可以直接返回无解。 + +接下来我们考察s中第二个未填写的字符i。注意这个位置可能不一定是s[1],因为s[1]可能已经由于一些相等关系的约束而已经填写了。同样,为了使得字典序最小,我们必然将s[i]置为'b'。此时我们只需要考察所有`lcp[i][j]>0`的位置j,那么必然有s[j]=s[i]='b'。当然如果我们发现s[j]已经被填写过了且不是'b',那么说明引出了矛盾,可以直接返回无解。 + +依次类推,我们可以将26个字母顺次填入s未填充的位置上。如果最后还有一些位置没有填充完,说明无法用26个英文字符完成任务。 + +以上得到的s是基于LCP可信赖的前提。但是LCP本身可能是有问题的,比如`lcp[0][2]=3`但是`lcp[1][3]=4`,这样的信息是矛盾的。所以我们还需要检验基于s的LCP矩阵的准确性。求任意两个位置的LCP,这本质就是求一个双序列的DP,用两层循环即可实现。 diff --git a/Greedy/2576.Find-the-Maximum-Number-of-Marked-Indices/2576.Find-the-Maximum-Number-of-Marked-Indices.cpp b/Greedy/2576.Find-the-Maximum-Number-of-Marked-Indices/2576.Find-the-Maximum-Number-of-Marked-Indices.cpp new file mode 100644 index 000000000..f18f83e3d --- /dev/null +++ b/Greedy/2576.Find-the-Maximum-Number-of-Marked-Indices/2576.Find-the-Maximum-Number-of-Marked-Indices.cpp @@ -0,0 +1,23 @@ +class Solution { +public: + int maxNumOfMarkedIndices(vector& nums) + { + sort(nums.begin(), nums.end()); + int n = nums.size(); + + int i = 0, j = n/2; + int count = 0; + for (int i=0; inums[j]) + j++; + if (j=2a`的元素b与之配对。目的是尽量保留更大的元素可以用来匹配次小的元素。然后重复这样的过程。 + +但是这种策略会遇到这样的一个例子:`[2,4,5,9]`。当2与4匹配之后,此时未被匹配的次小元素是5,反而变得更大了。 + +正确的思考方式是:如果总元素是n,那么最多能匹配n/2对。为了更多地凑出这些pairs,每个pair的较小值必然在排序后的nums的前半部分,而较大值在nums的后半部分。假设有一对[a,b]都在前半部分,另一对[c,d]都在后半部分,那么必然可以重构出两对[a,c],[b,d]同样更容易条件。同理,可以证明出其他情况下,任何处于同一个半区的配对都不会是最优解。 + +所以本题只需要使用双指针,第一个指针i在前半区遍历,第二个指针j在后半区遍历,对于每个i单调移动j看看是否能有配对即可。 + +本题在codeforces上的原题是:https://codeforces.com/contest/372/problem/A + diff --git a/Greedy/2580.Count-Ways-to-Group-Overlapping-Ranges/2580.Count-Ways-to-Group-Overlapping-Ranges.cpp b/Greedy/2580.Count-Ways-to-Group-Overlapping-Ranges/2580.Count-Ways-to-Group-Overlapping-Ranges.cpp new file mode 100644 index 000000000..f37b46d2b --- /dev/null +++ b/Greedy/2580.Count-Ways-to-Group-Overlapping-Ranges/2580.Count-Ways-to-Group-Overlapping-Ranges.cpp @@ -0,0 +1,26 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { +public: + int countWays(vector>& ranges) + { + sort(ranges.begin(), ranges.end()); + int n = ranges.size(); + LL ret = 1; + + for (int i=0; i>& tasks) + { + sort(tasks.begin(), tasks.end(), [](vector&a, vector&b){ + return a[1] < b[1]; + }); + + vectortime(2005); + for (int i=0; i= duration) continue; + int diff = duration - overlap; + for (int t=end; t>=start; t--) + { + if (time[t]==0) + { + time[t] = 1; + diff--; + } + if (diff == 0) + break; + } + } + + int ret = 0; + for (int t=0; t<=2000; t++) + ret += (time[t]==1); + return ret; + } +}; diff --git a/Greedy/2589.Minimum-Time-to-Complete-All-Tasks/2589.Minimum-Time-to-Complete-All-Tasks_v2.cpp b/Greedy/2589.Minimum-Time-to-Complete-All-Tasks/2589.Minimum-Time-to-Complete-All-Tasks_v2.cpp new file mode 100644 index 000000000..58e273662 --- /dev/null +++ b/Greedy/2589.Minimum-Time-to-Complete-All-Tasks/2589.Minimum-Time-to-Complete-All-Tasks_v2.cpp @@ -0,0 +1,46 @@ +using AI3 = array; +class Solution { +public: + int findMinimumTime(vector>& tasks) + { + sort(tasks.begin(), tasks.end(), [](vector&a, vector&b){ + return a[1] < b[1]; + }); + + vectorarr; + arr.push_back({-2,-1,0}); + + for (int i=0; i 0) + { + if (abs(arr.back()[1] - cur) < diff) + { + diff -= abs(arr.back()[1] - cur); + cur = arr.back()[0] - 1; + arr.pop_back(); + } + else + { + arr.push_back({cur-diff+1, end, arr.back()[2] + end-(cur-diff)}); + diff = 0; + } + } + } + + return arr.back()[2]; + } +}; diff --git a/Greedy/2589.Minimum-Time-to-Complete-All-Tasks/Readme.md b/Greedy/2589.Minimum-Time-to-Complete-All-Tasks/Readme.md new file mode 100644 index 000000000..6674d3426 --- /dev/null +++ b/Greedy/2589.Minimum-Time-to-Complete-All-Tasks/Readme.md @@ -0,0 +1,22 @@ +### 2589.Minimum-Time-to-Complete-All-Tasks + +#### 解法1: +我们将所有任务按照end排序。这是因为end早的任务我们必然先考虑,其它没有到deadline的任务都可以放一放。对于第一个任务,我们必然会尽量拖延它的启动时间,即实际的运作时段是`[end-duration+1, end]`,这样就可以与后面的任务有更多的重合时间。对于第二个任务,必然会充分利用它与第一个任务实际工作的重合部分,假设已经重合的时间不够完成第二个任务,那么我们会在什么时段继续工作呢?其实也是同理,就是卡在第二个任务的deadline之前完成,目的也是为了尽量拖延,增加与后面的任务重合的概率。依此贪心的策略,就可以处理所有的任务。 + +因为本题的数据量不多,任务的个数`n <= 2000`,另外整体的时间跨度也不大`1 <= starti, endi <= 2000`,所以本题可以通过在时间轴上的遍历来暴力解决。比如对于某个任务[start,end,duration],我们先看时间轴上哪些时刻是标记为开工的,与它的重合部分有多长,再与duration比较还得需要多长时间diff才能完成。如果T大于零,那么我们就从end开始往前遍历,将没有在工作的时刻标记为开工,直至把diff都消耗完。 + +这样的时间复杂度是`O(N*T)`。 + +#### 解法2: +上述算法的时间复杂度其实可以不依赖于总时间跨度T。可以想象,如果每个任务的时间跨度都很大,那么遍历时间轴的效率是很低的。上述的算法可以用o(nlogn)来实现。 + +在上述算法里,我们依次处理每个任务的时候,其实都会在时间轴上确定下一段段的实际开工时间,它们是一系列互不重叠的区间。所以我们用arr来盛装这些区间,对于每个区间我们用[a,b,totalTime]表示,a表示起点、b表示终点(都是双闭区间),totalTime表示该区间结束时整个时间轴上已经开工时间的总和,相当于arr里开工区间的长度的前缀和。 + +对于一个新任务[start,end,duration],我们先要计算新区间与已经开工的这些区间有多少重合度`overlap`。因为有了前缀和的信息,所以这个计算是可行的。我们只需要用二分法,定位start在arr里的位置,找到最后一个早于start的区间interval。如果此interval与新任务完全不重合,那么新任务与已开工的重合度`overlap`就是interval右边的所有开工区间的长度,这可以用两个前缀和之差得到。如果此interval与新任务有重合部分,那么`overlap`就是前一种情况的计算结果,再加上此区间与[start,end]的重合部分。 + +我们知道了`duration`和`overlap`,就可以知道我们还需要从end开始往前填充若干个开工区间之间的间隔,以满足额外的开工长度`diff`,显然这会导致arr最后的几个开工区间合并起来。所以我们就暴力从后往前枚举每个区间[a,b],思路如下: +1. 如果融合了该区间,那么我们新增了多长的开工时间(新增的开工时间其实是该区间与下一个区间之间的间隔长度) +2. 如果新增的开工时间大于diff,那么说明该区间不用被重合,我们只需要计算该区间右端点后的某个位置x,将[x,end]加入arr即可。 +3. 如果新增的开工时间小于diff,那么说明该区间需要被融合,我们将arr的最后一个区间重置为[a,end]. 更新diff后开启下一个循环。 + +最终的总开工时间就是arr里最后一个区间对应的前缀和。 diff --git a/Greedy/2598.Smallest-Missing-Non-negative-Integer-After-Operations/2598.Smallest-Missing-Non-negative-Integer-After-Operations.cpp b/Greedy/2598.Smallest-Missing-Non-negative-Integer-After-Operations/2598.Smallest-Missing-Non-negative-Integer-After-Operations.cpp new file mode 100644 index 000000000..18ee10bd5 --- /dev/null +++ b/Greedy/2598.Smallest-Missing-Non-negative-Integer-After-Operations/2598.Smallest-Missing-Non-negative-Integer-After-Operations.cpp @@ -0,0 +1,27 @@ +class Solution { +public: + int findSmallestInteger(vector& nums, int value) + { + vectorcount(value); + + for (int& x: nums) + { + x = ((x%value)+value) % value; + count[x] += 1; + } + + int min_count = INT_MAX; + int k; + + for (int i=0; i=0; i--) + { + for (char ch=s[i]+1; ch<'a'+k; ch++) + { + if (!checkOK(s, i, ch)) continue; + s[i] = ch; + + for (int j=i+1; j=1 && s[i-1]==ch) return false; + if (i>=2 && s[i-2]==ch) return false; + return true; + } +}; diff --git a/Greedy/2663.Lexicographically-Smallest-Beautiful-String/Readme.md b/Greedy/2663.Lexicographically-Smallest-Beautiful-String/Readme.md new file mode 100644 index 000000000..d03848765 --- /dev/null +++ b/Greedy/2663.Lexicographically-Smallest-Beautiful-String/Readme.md @@ -0,0 +1,7 @@ +### 2663.Lexicographically-Smallest-Beautiful-String + +本题的关键就是如何解读“不能出现回文子串”。其实这个约束可以简化为“没有任何两个相邻的字符相同”,且“没有任何长度为3的子串里第一个和第三个字符相同”。 + +然后我们就可以贪心地从低位往高位遍历,查看某位置i上能否填写一个比原先更大的字符,且满足上述的约束。如果可以,那么我们必然会尝试贪心地将[i+1:n-1]这一段构造为字典序最小、且符合约束的字符串。事实上,我们总是能构造成功的,因为在任何的位置上,我们只有两个约束(不能与前一个字符相同,不能与前前字符相同),但是`k>=4`,我们至少可以有四种候选。故这样的贪心构造法必然能实现,且保证字典序最小。 + +因此,只要我们从低位往高位遍历,找到第一个实现上述构造(即s[i]和s[i+1:n-1]都满足条件)的位置,那么就有了最终答案。 diff --git a/Greedy/2712.Minimum-Cost-to-Make-All-Characters-Equal/2712.Minimum-Cost-to-Make-All-Characters-Equal_v1.cpp b/Greedy/2712.Minimum-Cost-to-Make-All-Characters-Equal/2712.Minimum-Cost-to-Make-All-Characters-Equal_v1.cpp new file mode 100644 index 000000000..9a47b6cfb --- /dev/null +++ b/Greedy/2712.Minimum-Cost-to-Make-All-Characters-Equal/2712.Minimum-Cost-to-Make-All-Characters-Equal_v1.cpp @@ -0,0 +1,15 @@ +using LL = long long; +class Solution { +public: + long long minimumCost(string s) + { + int n = s.size(); + long long ret = 0; + for (int i=1; ileft(n); + int lastOne = -1; + LL sum = 0; + for (int i=0; i=1 && s[i-1]=='1') + sum = sum+1; + else + sum += (i+1) + i; + + left[i] = sum; + lastOne = i; + } + + vectorright(n); + lastOne = n; + sum = 0; + for (int i=n-1; i>=0; i--) + { + if (s[i]=='0') + { + right[i] = sum; + continue; + } + + if (i+1 goodSubsetofBinaryMatrix(vector>& grid) + { + int m = grid.size(), n = grid[0].size(); + unordered_map>Map; + for (int i=0; i>j)&1)) + { + flag = 0; + break; + } + } + if (flag==0) continue; + if (Map[s].size()==0) continue; + + for (int k: Map[s]) + { + if (k!=i) + { + vectorrets({i,k}); + sort(rets.begin(), rets.end()); + return rets; + } + } + } + } + + return {}; + } +}; diff --git a/Greedy/2732.Find-a-Good-Subset-of-the-Matrix/Readme.md b/Greedy/2732.Find-a-Good-Subset-of-the-Matrix/Readme.md new file mode 100644 index 000000000..daa971062 --- /dev/null +++ b/Greedy/2732.Find-a-Good-Subset-of-the-Matrix/Readme.md @@ -0,0 +1,13 @@ +### 2732.Find-a-Good-Subset-of-the-Matrix + +我们将每行用一个最多含5 bit的二进制数编码来表示它的每个列位置是0还是1. 为了增大复杂度,我们令列数是5. + +首先,我们考虑两种特殊情况。如果有一行的编码是0,那么它自身组成的集合就符合条件。另外,如果有两行的编码的“交集”为零,那么这两行组成的集合也符合条件。 + +接下来考虑,如果任何两行的state的交集都不为0,那么会出现什么情况。 + +我们可以知道,想要有解,至少存在一行,最多含有两个bit 1. 理由是,如果所有的行都存在三个或以上的bit 1,那么无论选取哪些k行,总的bit 1的数就是大于等于3k,但是根据题意“任何一列的bit 1的数目不能超过行数的一半”,即总的bit 1的数目不能超过`0.5k*5=2.5k`,从而产生矛盾。不失一般性地,我们可以令某一行的编码是b00011。 + +回到之前的前提,“如果任何两行的编码的交集都不为0”,那么其他选取的k-1行里,在第0和1的位置上至少有一个bit 1。于是总体的这k行里,就有了至少k+1个bit 1。这就说明了在第0和1的位置上,不可能有任何一列的bit 1的个数少于等于`floor(k/2)`。得到矛盾,因此“任何两行的编码交集都不为0”情况下,是不可能有解的。 + +综上,我们只需要考察之前所述的两种特殊情况即可找出解,或者判定无解。对于第二种特殊情况,我们建立`编码->行号`的映射,就可以知道对于行A而言,是否存在与之符合条件的行B了。 diff --git a/Greedy/2745.Construct-the-Longest-New-String/2745.Construct-the-Longest-New-String.cpp b/Greedy/2745.Construct-the-Longest-New-String/2745.Construct-the-Longest-New-String.cpp new file mode 100644 index 000000000..9a3d1bfde --- /dev/null +++ b/Greedy/2745.Construct-the-Longest-New-String/2745.Construct-the-Longest-New-String.cpp @@ -0,0 +1,8 @@ +class Solution { +public: + int longestString(int x, int y, int z) + { + int t = x+y+z-max(0, (max(x,y)-min(x,y)-1)); + return t*2; + } +}; diff --git a/Greedy/2745.Construct-the-Longest-New-String/Readme.md b/Greedy/2745.Construct-the-Longest-New-String/Readme.md new file mode 100644 index 000000000..330d99ac9 --- /dev/null +++ b/Greedy/2745.Construct-the-Longest-New-String/Readme.md @@ -0,0 +1,7 @@ +### 2745.Construct-the-Longest-New-String + +当我们仅考虑AA和BB时,我们可以将其交替串联,如BBAABBAA...,注意最后可以AA或BB结尾,使用两种片段的个数最多差1。这样能使用到的片段个数是 `min(x,y)*2 + min(abs(x-y),1)`. + +然后考虑所有的AB,只需将其插入任何BB与AA之间即可,不影响之前的构造。 + +所以最终能使用到的片段个数是 `min(x,y)*2 + min(abs(x-y),1) +z`. diff --git a/Greedy/2749.Minimum-Operations-to-Make-the-Integer-Zero/2749.Minimum-Operations-to-Make-the-Integer-Zero.cpp b/Greedy/2749.Minimum-Operations-to-Make-the-Integer-Zero/2749.Minimum-Operations-to-Make-the-Integer-Zero.cpp new file mode 100644 index 000000000..8bbf04ca8 --- /dev/null +++ b/Greedy/2749.Minimum-Operations-to-Make-the-Integer-Zero/2749.Minimum-Operations-to-Make-the-Integer-Zero.cpp @@ -0,0 +1,20 @@ +using LL = long long; +class Solution { +public: + int makeTheIntegerZero(int num1, int num2) + { + long long x = num1; + long long y = num2; + int k = 1; + while (1) + { + x -= y; + if (x < k) return -1; + + int count = __builtin_popcountll(x); + if (count <= k) return k; + k++; + } + return -1; + } +}; diff --git a/Greedy/2749.Minimum-Operations-to-Make-the-Integer-Zero/2749.Minimun-Operations-to-make-the-integer-Zero-PYTHON b/Greedy/2749.Minimum-Operations-to-Make-the-Integer-Zero/2749.Minimun-Operations-to-make-the-integer-Zero-PYTHON new file mode 100644 index 000000000..820e251bf --- /dev/null +++ b/Greedy/2749.Minimum-Operations-to-Make-the-Integer-Zero/2749.Minimun-Operations-to-make-the-integer-Zero-PYTHON @@ -0,0 +1,17 @@ +class Solution(object): + def makeTheIntegerZero(self,num1,num2): + x = num1 + y = num2 + k = 1 + + while True: + x = x - y + if x < k: + return -1 + + if bin(x).count('1') <= k: + return k + + k = k + 1 + +# I hope this can help anyone whos resolving this huge problem on python ;) diff --git a/Greedy/2749.Minimum-Operations-to-Make-the-Integer-Zero/Readme.md b/Greedy/2749.Minimum-Operations-to-Make-the-Integer-Zero/Readme.md new file mode 100644 index 000000000..0a59bc74b --- /dev/null +++ b/Greedy/2749.Minimum-Operations-to-Make-the-Integer-Zero/Readme.md @@ -0,0 +1,5 @@ +### 2749.Minimum-Operations-to-Make-the-Integer-Zero + +本题就是寻找最小的操作次数k,使得`num1-k*num2`可以表示为k个`2^i`相加的形式,标记为(*)。 + +我们观察k个`2^i`相加,它有最小值就是k。所以如果`num1-k*num2 doors); + * void closeDoor(); + * bool isDoorOpen(); + * void moveRight(); + * }; + */ +class Solution { +public: + int houseCount(Street* street, int k) + { + while (!street->isDoorOpen()) + street->moveRight(); + street->moveRight(); + + int step = 1; + int lastOpen = 0; + for (int i=0; iisDoorOpen()) + { + lastOpen = step; + street->closeDoor(); + } + step++; + street->moveRight(); + } + return lastOpen; + } +}; diff --git a/Greedy/2753.Count-Houses-in-a-Circular-Street-II/Readme.md b/Greedy/2753.Count-Houses-in-a-Circular-Street-II/Readme.md new file mode 100644 index 000000000..47e7a2e56 --- /dev/null +++ b/Greedy/2753.Count-Houses-in-a-Circular-Street-II/Readme.md @@ -0,0 +1,5 @@ +### 2753.Count-Houses-in-a-Circular-Street-II + +我们先找到一处状态为open的门。然后从下一个位置作为起点,连续走k格,图中如果遇到任何open的门就将其关闭,但同时记录并保持更新lastOpen相对于起点的距离。 + +走完k格之后,lastOpen一定就是起点之前的那扇门,于是lastOpen相对于起点的距离就是整圈的长度。 diff --git a/Greedy/2813.Maximum-Elegance-of-a-K-Length-Subsequence/2813.Maximum-Elegance-of-a-K-Length-Subsequence.cpp b/Greedy/2813.Maximum-Elegance-of-a-K-Length-Subsequence/2813.Maximum-Elegance-of-a-K-Length-Subsequence.cpp new file mode 100644 index 000000000..76d6f2be6 --- /dev/null +++ b/Greedy/2813.Maximum-Elegance-of-a-K-Length-Subsequence/2813.Maximum-Elegance-of-a-K-Length-Subsequence.cpp @@ -0,0 +1,49 @@ +using LL = long long; +using PII = pair; +class Solution { +public: + long long findMaximumElegance(vector>& items, int k) + { + sort(items.rbegin(), items.rend()); + + LL sum = 0; + unordered_mapMap; + for (int i=0; i, greater<>>pq; + for (int i=0; i 1) + { + sum -= profit; + sum += items[i][0]; + t++; + Map[cate]--; + Map[items[i][1]]++; + + ret = max(ret, sum + t*t); + break; + } + } + } + + return ret; + } +}; diff --git a/Greedy/2813.Maximum-Elegance-of-a-K-Length-Subsequence/Readme.md b/Greedy/2813.Maximum-Elegance-of-a-K-Length-Subsequence/Readme.md new file mode 100644 index 000000000..12052adb1 --- /dev/null +++ b/Greedy/2813.Maximum-Elegance-of-a-K-Length-Subsequence/Readme.md @@ -0,0 +1,13 @@ +### 2813.Maximum-Elegance-of-a-K-Length-Subsequence + +一个显然的想法是,能否遍历种类的数目:在固定种类数目的情况下,贪心地选择对应profit最高的k个item。但是即使说我们只考虑t个category,但是这样的t-distinct的category组合也非常多,我们无法穷举。 + +我们继续考虑。如果将所有元素按照profit降序排列,粗暴地取前k个元素,并记此时有t种不同的category,那么我们至少可以claim,当强制选择t个category时,此时的收益一定是最高的。因为我们选取的项目本身就是profit的top K. + +然后我们想,强制选择小于t个category的话,该如何规划呢?本题的突破口就在这里。我们知道,相比于上述`choose profit top K`的决策,其他任何决策都不会在`total_profit`更优;并且如果打算选择的category个数还更小的话,`distinct_categories^2`也不会占优势。故总的elegance肯定不及上面的方案。所以我们可以终止这个方向的探索。 + +然后我们想,强制选择多余t个category的话,该如何规划呢?既然top K个item已经包含了t个category,我们必然会贪心地按照profit的降序考察后续的项目,直至找到一个属于新种类的item,这样就有了t+1个category.注意,此时我们为了保持item总数为k,必然要吐出一个item:这个item必然是profit尽量小,同时它对应的category必须还存在其他的元素(否则将其吐出之后总的category数目就又不够t+1了)。所以我们的做法是将之前的top k item都放入一个小顶堆的PQ,需要弹出时查看当前profit最小的item是否是“单身”,如果是的话就忽略,如果否的话就可以将其“吐出”而将属于新category的item加入。这样我们就得到了t+1个category时的profit top k. + +依次类推,我们可以得到t+2个category时的profit top k,以及t+3个category时的profit top k等等。最终在所有category数目对应的最大elegance里挑选最大值。 + +但是注意,在贪心的过程中,如果我们无法找到一个可以吐出的item时,意味着我们无法构造“t+1个category时的profit top k”,因为这时已经发生了`t+1>k`。 diff --git a/Greedy/2835.Minimum-Operations-to-Form-Subsequence-With-Target-Sum/2835.Minimum-Operations-to-Form-Subsequence-With-Target-Sum.cpp b/Greedy/2835.Minimum-Operations-to-Form-Subsequence-With-Target-Sum/2835.Minimum-Operations-to-Form-Subsequence-With-Target-Sum.cpp new file mode 100644 index 000000000..524315fb7 --- /dev/null +++ b/Greedy/2835.Minimum-Operations-to-Form-Subsequence-With-Target-Sum/2835.Minimum-Operations-to-Form-Subsequence-With-Target-Sum.cpp @@ -0,0 +1,51 @@ +class Solution { +public: + int minOperations(vector& nums, int target) + { + vectorcount(31, 0); + for (int x: nums) + { + int i = 0; + while (x>0) + { + x/=2; + i++; + } + count[i-1] += 1; + } + + vectort; + for (int i=0; i<31; i++) + { + if ((target>>i)&1) + t.push_back(i); + } + + int ret = 0; + for (int i: t) + { + int j = 0; + while (j0) + { + count[i] -= 1; + continue; + } + + while (j<31 && count[j]==0) + j++; + if (j==31) return -1; + count[j] -= 1; + for (int k=j-1; k>=i; k--) + count[k]+=1; + ret += j-i; + } + + return ret; + } +}; diff --git a/Greedy/2835.Minimum-Operations-to-Form-Subsequence-With-Target-Sum/Readme.md b/Greedy/2835.Minimum-Operations-to-Form-Subsequence-With-Target-Sum/Readme.md new file mode 100644 index 000000000..d2f2b9364 --- /dev/null +++ b/Greedy/2835.Minimum-Operations-to-Form-Subsequence-With-Target-Sum/Readme.md @@ -0,0 +1,19 @@ +### 2835.Minimum-Operations-to-Form-Subsequence-With-Target-Sum + +显然我们会将nums里的元素做二进制分解,每个二进制位上会有若干个1,我们将其记录在count数组里。count[i]表示第i个bit位上我们有多少个1. + +同理,我们会将target做二进制分解,每个bit位上的1表示我们需要从count里得到的“支持”。比如说,如果target上的每个需要1的二进制位i上,count[i]都大于零的话,那么意味着nums已经可以拼凑出target了。 + +我们从低到高逐个考虑target所需要的二进制位. 假设我们需要第i个bit位上的1,那么我们该如何考察count能否支持呢? + +1. 首先我们考虑比i低的二进制位上,count是否能够通过现有的低位上的“1”的sum来实现第i位上的1(注意,因为nums里每个元素只有一个bit 1,所以低位上1的sum必然对应着nums里某些元素的sum). 我们可以将所有低位的1都加起来,通过逐次进位的形式,看看能否传播到第i位。比如说count[0]=5,i=2, 那么我们可以对count做如下变化 +``` +step 1: count[0]=1, count[1]=2 +step 2: count[0]=1, count[1]=0, count[2]=1 +``` + +基本思想就是:能进位则进位。最终每个count[]上不是0就是1. 上面的例子里,count[0]=5 确实可以给target的第i位提供1的支持。 + +2. 其次,如果以上方法不能实现,那么就意味着我们需要将高位上的1进行“拆解”以满足第i位上的1。显然,我们会贪心地在count里找到最接近i且count>0的位置j,将其拆解j-i次,就可以将第j位上的1传播到j-1,j-2,...i各个位上。这样我们就满足了taget在第i位上的需求。 + +通过以上方法,就是实现本题的最优方案。 diff --git a/Greedy/2856.Minimum-Array-Length-After-Pair-Removals/2856.Minimum-Array-Length-After-Pair-Removals.cpp b/Greedy/2856.Minimum-Array-Length-After-Pair-Removals/2856.Minimum-Array-Length-After-Pair-Removals.cpp new file mode 100644 index 000000000..facb827a9 --- /dev/null +++ b/Greedy/2856.Minimum-Array-Length-After-Pair-Removals/2856.Minimum-Array-Length-After-Pair-Removals.cpp @@ -0,0 +1,22 @@ +class Solution { +public: + int minLengthAfterRemovals(vector& nums) + { + int n = nums.size(); + unordered_mapMap; + for (int i=0; i n/2) + return n - (n-mx)*2; + else + return (n%2); + + } +}; diff --git a/Greedy/2856.Minimum-Array-Length-After-Pair-Removals/Readme.md b/Greedy/2856.Minimum-Array-Length-After-Pair-Removals/Readme.md new file mode 100644 index 000000000..109179d8a --- /dev/null +++ b/Greedy/2856.Minimum-Array-Length-After-Pair-Removals/Readme.md @@ -0,0 +1,7 @@ +### 2856.Minimum-Array-Length-After-Pair-Removals + +本题的本质就是Boyer-Moore Majority Voting Algorithm的实现。当存在一个超过半数的majority时,显然其他所有元素“联合”起来不能使它“消除”。反过来的结论也是成立的。 + +所以,当存在一个超过半数的majority时,记它的频次是f。那么剩余元素的频次是n-f。每个其他元素消灭一个多数元素,剩下的就是`n-(n-f)*2`. + +当不存在超过半数的majority时,理论上是能够最终彼此消灭的,但是别忘了n的奇偶性。当n是奇数时一定会有一个元素留下来。 diff --git a/Greedy/2868.The-Wording-Game/2868.The-Wording-Game.cpp b/Greedy/2868.The-Wording-Game/2868.The-Wording-Game.cpp new file mode 100644 index 000000000..ba81b73ce --- /dev/null +++ b/Greedy/2868.The-Wording-Game/2868.The-Wording-Game.cpp @@ -0,0 +1,23 @@ +class Solution { +public: + bool canAliceWin(vector& a, vector& b) + { + int m = a.size(), n = b.size(); + int i = 0, j = 0; + + while (1) + { + while (j a[i][0]+1) + return true; + + while (i b[j][0]+1) + return false; + } + + return false; + } +}; diff --git a/Greedy/2868.The-Wording-Game/Readme.md b/Greedy/2868.The-Wording-Game/Readme.md new file mode 100644 index 000000000..186f3fe66 --- /dev/null +++ b/Greedy/2868.The-Wording-Game/Readme.md @@ -0,0 +1,5 @@ +### 2868.The-Wording-Game + +本题看上去是很复杂的决策,但本质上就是简单的贪心。类似于打扑克,自己尽量出能恰好压过对手的牌,保留更大的牌后发制人,使得自己可以支撑更多的回合。 + +注意,本题里集合a与b之间都没有任何相同的字符串。这说明不必顾虑“对手出了某张牌导致自己有相同的牌无法打出”的情况。谁手里的牌大、牌多就是能赢的硬道理。 diff --git a/Greedy/2871.Split-Array-Into-Maximum-Number-of-Subarrays/2871.Split-Array-Into-Maximum-Number-of-Subarrays.cpp b/Greedy/2871.Split-Array-Into-Maximum-Number-of-Subarrays/2871.Split-Array-Into-Maximum-Number-of-Subarrays.cpp new file mode 100644 index 000000000..bb271a326 --- /dev/null +++ b/Greedy/2871.Split-Array-Into-Maximum-Number-of-Subarrays/2871.Split-Array-Into-Maximum-Number-of-Subarrays.cpp @@ -0,0 +1,23 @@ +class Solution { +public: + int maxSubarrays(vector& nums) + { + int n = nums.size(); + int ret = 0; + for (int i=0; i& nums, int k) + { + vectorcount(32); + + for (int x: nums) + { + for (int i=0; i<32; i++) + { + if ((x>>i)&1) + count[i] += 1; + } + } + + LL ret = 0; + + for (int t=0; t=0; i--) + { + if (count[i]>0) + { + x += (1LL< 1 1 +1, 0 => 0 1 +0, 1 => 0 1 +0, 0 => 0 0 +``` +我们发现OR的效果其实是在每个bit位上“收集”1,而AND的效果其实就是“送出”1. 一进一出,不难发现`X+Y= AND+OR`。也就是说每次操作X和Y的一对数,我们在“零和”的前提下,拉大了“贫富差距”。这是我们想要的吗?是的,因为这能增大平方和。简单的证明,当`x>=y`且`d>0`时 +``` +(x+d)^2 + (y-d)^2 = x^2 + y^2 + 2d*(x-y) + 2d^2 > x^2 + y^2 +``` + +因为可以无限次操作,所以可以任意从某个元素出发,通过不断OR来“吸取”其他元素里各bit位上的1,直至构造出尽量大的元素。 + +代码中,我们统计每个bit上,nums里总共提供多少个1. 构造大数时,只需从最高位到最低位尽量填充1即可,如果没有库存了,就填充0. 最终取前k个大数,算一下平方和即可。 diff --git a/Greedy/2983.Palindrome-Rearrangement-Queries/2983.Palindrome-Rearrangement-Queries.cpp b/Greedy/2983.Palindrome-Rearrangement-Queries/2983.Palindrome-Rearrangement-Queries.cpp new file mode 100644 index 000000000..15bf8654b --- /dev/null +++ b/Greedy/2983.Palindrome-Rearrangement-Queries/2983.Palindrome-Rearrangement-Queries.cpp @@ -0,0 +1,90 @@ +using PII = pair; +class Solution { + int diff[100005]; + int presum1[100005][26]; + int presum2[100005][26]; +public: + vector canMakePalindromeQueries(string s, vector>& queries) + { + int n = s.size(); + string t = s.substr(n/2, n/2); + reverse(t.begin(), t.end()); + int m = n/2; + s = s.substr(0, n/2); + t = "#"+t; + s = "#"+s; + + for (int i=1; i<=m; i++) + diff[i] = diff[i-1] + (s[i]!=t[i]); + + for (int i=1; i<=m; i++) + for (int ch=0; ch<26; ch++) + { + presum1[i][ch] = presum1[i-1][ch] + (s[i]=='a'+ch); + presum2[i][ch] = presum2[i-1][ch] + (t[i]=='a'+ch); + } + + vectorrets; + for (auto& query: queries) + { + int a = query[0]+1, b = query[1]+1; + int c = m-1-(query[3]-n/2)+1; + int d = m-1-(query[2]-n/2)+1; + + rets.push_back(process(a,b,c,d,m)); + } + return rets; + } + + bool process(int a, int b, int c, int d, int m) + { + vectorcross; + if (max(a,c) <= min(b,d)) + cross.push_back({max(a,c), min(b,d)}); + vectorA; + vectorB; + if (cross.size() == 0) + { + A.push_back({a,b}); + B.push_back({c,d}); + } + else + { + if (a<=c-1) A.push_back({a,c-1}); + if (d+1<=b) A.push_back({d+1, b}); + if (b+1<=d) B.push_back({b+1, d}); + if (c<=a-1) B.push_back({c, a-1}); + } + + int count_diff = 0; + vectorUnion; + for (auto x: cross) Union.push_back(x); + for (auto x: A) Union.push_back(x); + for (auto x: B) Union.push_back(x); + for (auto [s,e]: Union) + count_diff += diff[e] - diff[s-1]; + if (diff[m] - count_diff != 0) return false; + + vectorcount1(26); + vectorcount2(26); + for (int ch=0; ch<26; ch++) + { + count1[ch] = presum1[b][ch] - presum1[a-1][ch]; + count2[ch] = presum2[d][ch] - presum2[c-1][ch]; + } + for (int ch=0; ch<26; ch++) + { + for (auto [s,e]: A) + count1[ch] -= presum2[e][ch] - presum2[s-1][ch]; + for (auto [s,e]: B) + count2[ch] -= presum1[e][ch] - presum1[s-1][ch]; + if (count1[ch]<0 || count2[ch]<0) + return false; + } + + for (int ch=0; ch<26; ch++) + if (count1[ch]!=count2[ch]) return false; + + return true; + } +}; diff --git a/Greedy/2983.Palindrome-Rearrangement-Queries/Readme.md b/Greedy/2983.Palindrome-Rearrangement-Queries/Readme.md new file mode 100644 index 000000000..ddaeb877e --- /dev/null +++ b/Greedy/2983.Palindrome-Rearrangement-Queries/Readme.md @@ -0,0 +1,24 @@ +### 2983.Palindrome-Rearrangement-Queries + +首先我们预处理一下,将s的后半段翻转之后记为t,令s和t的长度都是m=n/2。并且将两个字符串都看做1-index。 + +我们容易得到s中可以重排的区间记做[a,b],t中可以重排的区间记做[c,d]. 考虑到这两个区间可能有多种交汇的可能:不相交、相交但不包含,完全包含。我们做如下区间处理: +1. 计算相交的区间,记做cross:```{max(a,c), min(b,d)}```,注意该区间可能为空。 +2. 计算属于区间ab但不属于cd的区域,记做A + ```cpp + if (a<=c-1) A.push_back({a, c-1}); + if (d+1<=b) A.push_back({d+1, b}); + ``` +3. 计算属于区间cd但不属于ab的区域,记做B + ```cpp + if (c<=a-1) B.push_back({c,a-1}); + if (b+1<=d) B.push_back({b+1,d}); + ``` +4. 计算要么属于ab要么属于cd的区间,记做Union,其实就是以上cross, A, B里面区间的合并。 + +判断合法的条件有如下: +1. 在Union之外的区域,必须要求s和t每个字符都相等。这里有一个巧妙的判定方法。我们构造前缀数组diff[i]表示前i个位置里有多少个s与t字母不相同的位置。于是我们只需要查验Union里面的、不同字母的位置个数,是否等于diff[m]即可。 +2. 对于A区间,s里面的字符调整后必须和t里面的字符完全一致。所以我们先将s在区间ab的字符频次放入count1,再消耗掉t在区间A里的字符频次。要求不能出现负数。 +3. 对于B区间,t里面的字符调整后必须和s里面的字符完全一致。所以我们先将t在区间cd的字符频次放入count2,再消耗掉s在区间B里的字符频次。要求不能出现负数。 +4. 最后,剩余的count1和count2代表了cross部分,两者的字母频次必须完全一致。 + diff --git a/Greedy/3012.Minimize-Length-of-Array-Using-Operations/3012.Minimize-Length-of-Array-Using-Operations.cpp b/Greedy/3012.Minimize-Length-of-Array-Using-Operations/3012.Minimize-Length-of-Array-Using-Operations.cpp new file mode 100644 index 000000000..a3b8e6b96 --- /dev/null +++ b/Greedy/3012.Minimize-Length-of-Array-Using-Operations/3012.Minimize-Length-of-Array-Using-Operations.cpp @@ -0,0 +1,15 @@ +class Solution { +public: + int minimumArrayLength(vector& nums) + { + sort(nums.begin(), nums.end()); + for (int i=1; i& nums, int k) + { + int n = nums.size(); + vectorarr(n); + int ret = 0; + for (int t=29; t>=0; t--) + { + for (int i=0; i>t)&1); + + if (checkOK(arr, k)) + ret = ret*2+0; + else + { + ret = ret*2+1; + for (int i=0; i> 1); + } + } + return ret; + } + + bool checkOK(vector&arr, int k) + { + int n = arr.size(); + int count = 0; + int i = 0; + while (i>& points) + { + sort(points.begin(), points.end(), [](vector&a, vector&b){ + if (a[0]==b[0]) return a[1]>b[1]; + else return a[0] upper) continue; + if (points[j][1] > lower && points[j][1] <= upper) + ret++; + lower = max(lower, points[j][1]); + } + } + + return ret; + } +}; diff --git a/Greedy/3027.Find-the-Number-of-Ways-to-Place-People-II/Readme.md b/Greedy/3027.Find-the-Number-of-Ways-to-Place-People-II/Readme.md new file mode 100644 index 000000000..feb2bb3cf --- /dev/null +++ b/Greedy/3027.Find-the-Number-of-Ways-to-Place-People-II/Readme.md @@ -0,0 +1,5 @@ +### 3027.Find-the-Number-of-Ways-to-Place-People-II + +此题允许n^2的时间复杂度,可以暴力枚举所有符合条件的{左上角、右下角}配对,判断是否是合法的解。 + +对于二维坐标的点,有两个方向上的自由度,同时考虑他们之间的包含关系肯定复杂,我们必然会尝试将他们先按照一个维度排序,比如说x轴。对于两个点A和B在x轴上是递增的,他们能够配对成功的条件就是:x轴上A与B之间的点,不能在y轴上也出现在A与B之间。换句话说,如果横坐标位于A与B之间的所有点的y坐标上限是P(排除那些高于A的点),那么B能与A配对的条件就是:B的y周坐标必须大于P。随着B在x轴上离A越远,那么这个上限值其实是单调递增的。由此从左到右扫一遍所有的点,不断更新上限P,就能判断出每个点是否可以与A配对。 diff --git a/Greedy/3139.Minimum-Cost-to-Equalize-Array/3139.Minimum-Cost-to-Equalize-Array.cpp b/Greedy/3139.Minimum-Cost-to-Equalize-Array/3139.Minimum-Cost-to-Equalize-Array.cpp new file mode 100644 index 000000000..3e8fea5c5 --- /dev/null +++ b/Greedy/3139.Minimum-Cost-to-Equalize-Array/3139.Minimum-Cost-to-Equalize-Array.cpp @@ -0,0 +1,41 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { +public: + int minCostToEqualizeArray(vector& nums, int cost1, int cost2) + { + int n = nums.size(); + sort(nums.begin(), nums.end()); + + if (n<=2) + { + return (LL)(nums[n-1]-nums[0])*cost1%M; + } + + LL total = accumulate(nums.begin(), nums.end(), 0LL); + + cost2 = min(cost1*2, cost2); + int m = nums.back(); + LL ret = LLONG_MAX; + for (int limit = m; limit <= 2*m; limit++) + { + LL diff0 = limit - nums[0]; + LL diff_all = (LL)limit*n - total; + + LL ans; + if (diff0 <= diff_all/2) + { + ans = diff_all/2*cost2 + (diff_all%2==1?cost1:0); + } + else + { + ans = (diff_all - diff0)*cost2 + (diff0 - (diff_all - diff0))*cost1; + } + + ret = min(ret, ans); + } + + return ret%M; + + } +}; diff --git a/Greedy/3139.Minimum-Cost-to-Equalize-Array/Readme.md b/Greedy/3139.Minimum-Cost-to-Equalize-Array/Readme.md new file mode 100644 index 000000000..aadc62ecb --- /dev/null +++ b/Greedy/3139.Minimum-Cost-to-Equalize-Array/Readme.md @@ -0,0 +1,15 @@ +### 3139.Minimum-Cost-to-Equalize-Array + +首先,我们令cost2=min(cost1*2, cost2),保证使用第二种操作不会比第一种操作更亏。这样变换之后,我们知道应该尽量使用cost2更合算。 + +其次要注意,并不是操作的次数越少,付出的代价就越少。比如说nums=[1,4,4],我们可以使用三次cost1,使得所有元素最终变为4;也可以使用六次cost2,使得最终所有元素变成7. 哪种方案更好,取决于cost1和cost2的大小关系。因此本题的最优策略,不一定是将所有的元素都变成max(nums),而是可能一个更大的数字,我们记做limit。假如limit是已知量,我们如何计算需要用的最小代价呢? + +既然我们需要将所有元素都增至limit,那么将数组排序后,每个元素离目标还差{diff0, diff1, diff2, ...., 0}。记所有的diff之和为`diff_all`。我们容易理解这样一个结论:如果在所有的diff里面存在一个超过`diff_all`一半的绝对多数,那它必然就是diff0. 我们为了尽可能多地利用第二种操作,必然是每增加nums0的同时搭配一个增加其他的元素。最终我们会进行cost2的操作共`diff_all-diff0`次。剩下来还没增至limit的只是nums0,还差`limit-(diff_all-diff0)`,这就需要用cost1来实现。于是总共的代价为`(diff_all-diff0)*cost2 + (limit-(diff_all-diff0))*cost1`. + +相反,如果{diff}里面不存在一个绝对多数,根据Boyer-Moore Majority Voting的原理,那么我们必然可以持续找到一对pair进行增一操作,且它们来自两个不同的元素。最终直至所有元素都增至limit,或者只差一次增一操作故无法找到pair了。这种情况下,我们的cost2操作了`diff_all/2`次,并且根据diff_all的奇偶性,可能再增加一次cost1的操作。 + +以上我们知道当limit已知时,如何计算最小代价。但是limit该如何确定呢?我们发现,随着limit的增长,{diff}数组整体变大,越来越不可能出现第一种情况。而一旦{diff}进入第二种情况时,就已经将cost2用到了极致(即只会用最多一次cost1),再增长limit就没有意义。那什么时候{diff}会进入第二种情况呢?显然,至少当limit变成`2*max(nums)`,{diff}里面肯定不会出现绝对多数了(当n>=3时):这是因为diff0小于2m,而其他每个diff都大于m。 + +所以我们只需要穷举limit的范围`[m, 2m]`,对于给定的limit我们计算最小代价,全局再取最小即可。考虑到m是1e6,这是可以暴力实现的。 + + diff --git a/Greedy/3219.Minimum-Cost-for-Cutting-Cake-II/3219.Minimum-Cost-for-Cutting-Cake-II.cpp b/Greedy/3219.Minimum-Cost-for-Cutting-Cake-II/3219.Minimum-Cost-for-Cutting-Cake-II.cpp new file mode 100644 index 000000000..98acd52a6 --- /dev/null +++ b/Greedy/3219.Minimum-Cost-for-Cutting-Cake-II/3219.Minimum-Cost-for-Cutting-Cake-II.cpp @@ -0,0 +1,37 @@ +using LL = long long; +class Solution { +public: + long long minimumCost(int m, int n, vector& h, vector& v) + { + sort(h.rbegin(), h.rend()); + sort(v.rbegin(), v.rend()); + + LL i=0, j=0; + LL ret = 0; + while (iv[j]) + { + ret += h[i]*(j+1); + i++; + } + else + { + ret += v[j]*(i+1); + j++; + } + } + while (i costV`的时候,方案1更优。 + +综上,我们可以得到一个推测的结论:我们只需要将所有横切与纵切的位置按照cost从大到小排序、依次切割并且“一切到底”,这样得到的总代价最小。 + +上述结论有一个重要的前提,就是每刀都是一切到底。那么是否存在不“一切到底”反而更优的情况呢?考虑下面 +``` + _________C____ +A |_____|B | + | | | + |_____|______| + D +``` +假设按照排序后的cost,第一刀是竖切,第二刀是横切AB。那么是否该将右半边也横切掉呢?我们的顾虑是,如果在右半边存在一个竖切的位置CD,那么AB一切到底会引入两倍的竖切CD。而如果先切CD,再切AB的延长线,那么我们引入的代价是CD+AB*2. 事实上,从之前的排序过程中我们已经知道AB的代价大于CD,所以后者的方案是不如前者的。因此,我们在按照cost顺次执行切割的时候都会“一切到底”。 diff --git a/Greedy/3394.Check-if-Grid-can-be-Cut-into-Sections/3394.Check-if-Grid-can-be-Cut-into-Sections.cpp b/Greedy/3394.Check-if-Grid-can-be-Cut-into-Sections/3394.Check-if-Grid-can-be-Cut-into-Sections.cpp new file mode 100644 index 000000000..87f6a0874 --- /dev/null +++ b/Greedy/3394.Check-if-Grid-can-be-Cut-into-Sections/3394.Check-if-Grid-can-be-Cut-into-Sections.cpp @@ -0,0 +1,37 @@ +class Solution { +public: + bool checkValidCuts(int n, vector>& arr) + { + vector>widths; + vector>heights; + for (int i=0; i>&arr) + { + sort(arr.begin(),arr.end()); + + int j=0; + int count = 0; + for (int i=0; i=3) return true; + } + return false; + } +}; diff --git a/Greedy/3394.Check-if-Grid-can-be-Cut-into-Sections/Readme.md b/Greedy/3394.Check-if-Grid-can-be-Cut-into-Sections/Readme.md new file mode 100644 index 000000000..85ca694d5 --- /dev/null +++ b/Greedy/3394.Check-if-Grid-can-be-Cut-into-Sections/Readme.md @@ -0,0 +1,7 @@ +### 3394.Check-if-Grid-can-be-Cut-into-Sections + +本题的本质就是在横纵方向上,分别查验是否存在至少三个non-overlapping intervals. + +数non-overlapping intervals的经典算法就是将所有区间按照首端点排序。将第一个区间的未端点记作far,然后依次查看后续区间的首端点是否小于等于far,是的话就说明必然存在overlap。同时,每查看一个后续区间,我们都用该区间的尾端点区更新far值(取max)。直至下一个区间的首端点在far之后停止。此时我们之前考察的所有区间,必然都是存在partial overlap的,但是他们merge后的整体不会与其他区间再有重合。 + +之后我们再从下一个区间开始,重复上面的操作,找到另一个存在overlap的区间群。依次类推。 diff --git a/Greedy/3413.Maximum-Coins-From-K-Consecutive-Bags/3413.Maximum-Coins-From-K-Consecutive-Bags.cpp b/Greedy/3413.Maximum-Coins-From-K-Consecutive-Bags/3413.Maximum-Coins-From-K-Consecutive-Bags.cpp new file mode 100644 index 000000000..c94294323 --- /dev/null +++ b/Greedy/3413.Maximum-Coins-From-K-Consecutive-Bags/3413.Maximum-Coins-From-K-Consecutive-Bags.cpp @@ -0,0 +1,46 @@ +using LL = long long; +class Solution { +public: + long long maximumCoins(vector>& coins, int k) + { + LL ret = 0; + sort(coins.begin(), coins.end()); + ret = max(ret, helper(coins, k)); + + for (auto& coin: coins) + { + int a = coin[0], b = coin[1]; + coin[0] = -b; + coin[1] = -a; + } + sort(coins.begin(), coins.end()); + ret = max(ret, helper(coins, k)); + + return ret; + } + + LL helper(vector>& coins, int k) + { + int n = coins.size(); + int j = 0; + LL sum = 0; + LL ret = 0; + for (int i=0; i= coins[j][1]) + { + sum += (LL)(coins[j][1]-coins[j][0]+1)*coins[j][2]; + j++; + } + LL extra = 0; + if (j= coins[j][0]) + { + extra += (LL)(end - coins[j][0] + 1) * coins[j][2]; + } + ret = max(ret, sum + extra); + sum -= (LL)(coins[i][1]-coins[i][0]+1)*coins[i][2]; + } + return ret; + } +}; diff --git a/Greedy/3413.Maximum-Coins-From-K-Consecutive-Bags/Readme.md b/Greedy/3413.Maximum-Coins-From-K-Consecutive-Bags/Readme.md new file mode 100644 index 000000000..7c5469a3d --- /dev/null +++ b/Greedy/3413.Maximum-Coins-From-K-Consecutive-Bags/Readme.md @@ -0,0 +1,11 @@ +### 3413.Maximum-Coins-From-K-Consecutive-Bags + +此题和2271.Maximum-White-Tiles-Covered-by-a-Carpet的思路类似。 + +对于长度为k的跨度,如果其一个端点没有落在任何区间,那么显然是不划算的。我们必然有更优的策略:平移这段跨度直至一端接触到某个区间的边缘,这样可以在另一端覆盖到更多的有效区域得到更大的价值。注意“某个区间的端点”可以是左端点,也可以是右端点。 + +再考虑,对于长度为k的跨度,如果其两个端点分别都落在了区间A和区间B内,那么同样也是不划算的。只要区间A和B的价值密度不一样,那么我们必然能找到更优的解,即朝价值密度更高的那个方向平移即可。平移的最终结果是:完全离开价值密度低的区间(如果另一端依然在价值密度高的区间的话),或者触碰到价值密度高的区间的边缘。 + +所以上述的结论就是,最优解的情况,必然发生在所选跨度恰好触碰在某个区间边缘的时候。所以我们分两种情况。首先,从左往右遍历每个区间的左边缘,当做是所选跨度k的左边界,然后可以确定右边界的位置,这样就计算总价值;随着对左边界的挨个尝试,右边界也是单调移动的。所以这是一个典型的双指针。然后,反过来,从右往左遍历每个区间的右边缘,当做是所选跨度的右边界,然后可以确定左边界的位置,这样就计算总价值;随着对右边界的挨个尝试,左边界也是单调移动的。 + +对于第二次遍历,我们可以重复利用第一次遍历的函数。只要将每个区间的左右端点完全颠倒即可。即原区间范围是[a,b],那么我们构造一个新的区间范围[-b,-a]。这样我们依然可以重复利用从左往右遍历的代码,本质上实现了从右往左的遍历。 diff --git a/Greedy/3458.Select-K-Disjoint-Special-Substrings/3458.Select-K-Disjoint-Special-Substrings.cpp b/Greedy/3458.Select-K-Disjoint-Special-Substrings/3458.Select-K-Disjoint-Special-Substrings.cpp new file mode 100644 index 000000000..e452ea401 --- /dev/null +++ b/Greedy/3458.Select-K-Disjoint-Special-Substrings/3458.Select-K-Disjoint-Special-Substrings.cpp @@ -0,0 +1,55 @@ +class Solution { +public: + bool maxSubstringLength(string s, int k) + { + int n = s.size(); + vector>pos(26); + for (int i=0; i>intervals; + for (int letter=0; letter<26; letter++) + { + if (pos[letter].empty()) continue; + int start = pos[letter][0]; + int i = start; + int far = pos[letter].back(); + + bool flag = true; + while (i<=far) + { + far = max(far, pos[s[i]-'a'].back()); + if (pos[s[i]-'a'][0]=k; + } + + + int helper(vector> &intervals) { + sort(intervals.begin(), intervals.end(), [](pair a, pair b) { + return a.second < b.second; + }); + + int count = 0; + int far = INT_MIN; + + for (auto &interval : intervals) + { + if (interval.first > far) { + count++; + far = interval.second; + } + } + return count; + } +}; diff --git a/Greedy/3458.Select-K-Disjoint-Special-Substrings/3458.Select-K-Disjoint-Special-Substrings_v2.cpp b/Greedy/3458.Select-K-Disjoint-Special-Substrings/3458.Select-K-Disjoint-Special-Substrings_v2.cpp new file mode 100644 index 000000000..1cecdda41 --- /dev/null +++ b/Greedy/3458.Select-K-Disjoint-Special-Substrings/3458.Select-K-Disjoint-Special-Substrings_v2.cpp @@ -0,0 +1,60 @@ +class Solution { +public: + bool maxSubstringLength(string s, int k) + { + int n = s.size(); + vector>pos(26); + for (int i=0; i>intervals; + for (int letter=0; letter<26; letter++) + { + if (pos[letter].empty()) continue; + int start = pos[letter][0]; + int i = start; + int far = pos[letter].back(); + + bool flag = true; + while (i<=far) + { + far = max(far, pos[s[i]-'a'].back()); + if (pos[s[i]-'a'][0]=k; + } + + bool contains(paira, pairb) + { + return a.firstb.second; + } + + int helper(vector> &intervals) { + int n = intervals.size(); + vectorcheck(n, 1); + for (int i=0; i& A, vector& B) + { + int n = A.size(); + unordered_mapexpected; + for (int i=0; i& nums) + { + vector>arr; + int n = nums.size(); + for (int i=0; i0) + { + sum += x%10; + x/=10; + } + arr.push_back({sum, nums[i], i}); + } + + sort(arr.begin(), arr.end()); + + vectorA(n); + vectorB(n); + for (int i=0; i>& A, vector>& B) { + int n = A.size(); + int ret = INT_MAX; + int minEnd = INT_MAX; + for (int i=0; i=minEnd) + ret = min(ret, B[i].first+B[i].second); + else + ret = min(ret, minEnd+B[i].second); + } + return ret; + } + + int earliestFinishTime(vector& landStartTime, vector& landDuration, vector& waterStartTime, vector& waterDuration) { + vector>A; + vector>B; + for (int i=0; i lexicalOrder(int n) { - int current=1; - vectorresults(n); + vectorrets = {1}; + int i=1; - for (int i=0; in) - current=current/10; - current++; - while (current % 10==0) - current/=10; + i=i*10; } - } - return results; + else + { + while (i+1>n || (i%10==9)) + i = i/10; + i+=1; + } + + rets.push_back(i); + } + + return rets; } }; diff --git a/Greedy/386.Lexicographical-Numbers/Readme.md b/Greedy/386.Lexicographical-Numbers/Readme.md index 39240a944..14fbd992e 100644 --- a/Greedy/386.Lexicographical-Numbers/Readme.md +++ b/Greedy/386.Lexicographical-Numbers/Readme.md @@ -1,12 +1,8 @@ ### 386.Lexicographical-Numbers -研究序列[1,10,11,12,13,2,3,4,5,6,7,8,9],找出字典序的规律。 +对于字典序列的next,核心就是 +1. 尝试往后加0, 否则 +2. 找最低的、加1不需要进位的位置,在该位置加1后,舍弃之后的位置即可。 -规律1:不考虑上限,元素1后面跟什么元素?10, 100 … 也就是不断乘以10。 -规律2:如果99是上限,那么10后面的元素不能是100了,该怎么办?答案是11,也就是加1,这样个位上的数变大了。如果加1导致进位的话,虽然个位数变0,但十位上的数会变大,总之肯定字典序往后移。但此时得到的并不是下一个的目标,因为把其末尾的0去掉会得到字典序相对更前的数。砍掉0之后就可以重复规律1的操作了。 - -规律3:如果上限是19,那么19后面的元素就不能是20了,该怎么办?答案是将19除以10,然后再重复规律2(也就是加1),也就是得到2,之后又可以重复规律1了。 - - -[Leetcode Link](https://leetcode.com/problems/lexicographical-numbers) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/lexicographical-numbers) diff --git a/Greedy/452.Minimum-Number-of-Arrows-to-Burst-Balloons/452.Minimum-Number-of-Arrows-to-Burst-Balloons.cpp b/Greedy/452.Minimum-Number-of-Arrows-to-Burst-Balloons/452.Minimum-Number-of-Arrows-to-Burst-Balloons.cpp index 8bfb712a8..fb227b791 100644 --- a/Greedy/452.Minimum-Number-of-Arrows-to-Burst-Balloons/452.Minimum-Number-of-Arrows-to-Burst-Balloons.cpp +++ b/Greedy/452.Minimum-Number-of-Arrows-to-Burst-Balloons/452.Minimum-Number-of-Arrows-to-Burst-Balloons.cpp @@ -1,23 +1,23 @@ class Solution { - static bool cmp(paira, pairb) + static bool cmp(vector&a, vector&b) { - return a.second>& points) + int findMinArrowShots(vector>& points) { - sort(points.begin(),points.end(),cmp); - - int j=0; - int count=0; - while (j findPermutation(string s) + vector findPermutation(string pattern) { - s.insert(s.begin(),s[0]); - int N=s.size(); - vectorresults(N,0); + pattern = "I" + pattern; + int n = pattern.size(); - int left=0; - int right=0; + int mx = 0; + vectorarr; - while (right=mx+1; k--) + arr.push_back(k); + mx = mx+count; + i = j-1; } - return results; - + return arr; } }; diff --git a/Greedy/484.Find-Permutation/Readme.md b/Greedy/484.Find-Permutation/Readme.md index 1b973b5f4..0112a5f21 100644 --- a/Greedy/484.Find-Permutation/Readme.md +++ b/Greedy/484.Find-Permutation/Readme.md @@ -1,10 +1,7 @@ ### 484.Find-Permutation -需要人工分析出最优的策略。 +本题本质和`2375.Construct-Smallest-Number-From-DI-String`一模一样。 -以“下拐点”为分界点将s序列分为若干个II...IIDD...DD的组合。对于每个II...IIDD...DD,可以知道最优方法是:将除最后一个I之外的所有I对应一个递增数列,剩下的一个I和所有的D对应一个递减数列,且递减数列的最小值是那个递增数列最大值加1。更有用的是,可以知道,所有的递增数列的值都是和它的index值是对应的`results[i]=i+1`。 +基本思想是将s的开头加上一个“I”,将s切分为若干"IDD..D"的pattern,每个pattern对应的是一段单调递减的序列。保证序列与序列之间不重合即可。 -那么s的位数和results的位数不一样怎么办?一个简单的方法是`s.insert(s.begin(),s[0])`,这样s和results的元素数目就是一致的,且各个位置都适用同样的代码语句。 - - -[Leetcode Link](https://leetcode.com/problems/find-permutation) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/find-permutation) diff --git a/Greedy/556.Next-Greater-Element-III/556.Next-Greater-Element-III.cpp b/Greedy/556.Next-Greater-Element-III/556.Next-Greater-Element-III.cpp new file mode 100644 index 000000000..7ae9d9f89 --- /dev/null +++ b/Greedy/556.Next-Greater-Element-III/556.Next-Greater-Element-III.cpp @@ -0,0 +1,34 @@ +class Solution { +public: + int nextGreaterElement(int n) + { + vectordigits; + while(n>0) + { + digits.push_back(n%10); + n=n/10; + } + int m = digits.size(); + + reverse(digits.begin(), digits.end()); + + int i = m-1; + while (i>=1 && digits[i-1] >= digits[i]) + i--; + if (i==0) return -1; + + i--; + int j = m-1; + while (digits[j] <= digits[i]) + j--; + swap(digits[i], digits[j]); + sort(digits.begin()+i+1, digits.end()); + + long long ret=0; + for (int i=0; iINT_MAX) return -1; + else return ret; + } +}; diff --git a/String/556.Next-Greater-Element-III/Readme.md b/Greedy/556.Next-Greater-Element-III/Readme.md similarity index 92% rename from String/556.Next-Greater-Element-III/Readme.md rename to Greedy/556.Next-Greater-Element-III/Readme.md index 3702936d9..aa3aff20a 100644 --- a/String/556.Next-Greater-Element-III/Readme.md +++ b/Greedy/556.Next-Greater-Element-III/Readme.md @@ -1,8 +1,10 @@ ### 556.Next-Greater-Element-III +此题和`031.next permuation`一模一样 + 首先,从低位到高位找到第一个不满足升序的数字。显然,如果从低位到高位都是升序的话,那么找不到任何可以比这个数字更大的变换了。 假设找到这样的数字在第n+1位(记做k),那么在1\~n这个n个低位数字中找到恰比k大的数字(记做m),交换k和m。于是变换后的第n+1位就这么定下来了(可以分析出这就是最小的改动)。剩下来的第1~n位(记得其中有一个是之前调换过来的k),我们让它们按照降序排列即可。 -[Leetcode Link](https://leetcode.com/problems/next-greater-element-iii) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/next-greater-element-iii) diff --git a/Greedy/689.Maximum-Sum-of-3-Non-Overlapping-Subarrays/689.Maximum-Sum-of-3-Non-Overlapping-Subarrays.cpp b/Greedy/689.Maximum-Sum-of-3-Non-Overlapping-Subarrays/689.Maximum-Sum-of-3-Non-Overlapping-Subarrays.cpp new file mode 100644 index 000000000..6329128b2 --- /dev/null +++ b/Greedy/689.Maximum-Sum-of-3-Non-Overlapping-Subarrays/689.Maximum-Sum-of-3-Non-Overlapping-Subarrays.cpp @@ -0,0 +1,58 @@ +class Solution { +public: + vector maxSumOfThreeSubarrays(vector& nums, int k) + { + int n = nums.size(); + vectorpresum(n); + for (int i=0; ileftMax(n,0); + vectorleftIdx(n,0); + int maxSum = 0; + int maxIdx = 0; + for (int i=k-1; i maxSum) + { + maxSum = sum; + maxIdx = i-k+1; + } + leftMax[i] = maxSum; + leftIdx[i] = maxIdx; + } + + vectorrightMax(n,0); + vectorrightIdx(n,0); + maxSum = 0; + maxIdx = 0; + for (int i=n-k; i>=0; i--) + { + // [i : i+k-1] + int sum = presum[i+k-1] - (i==0?0:presum[i-1]); + if (sum >= maxSum) + { + maxSum = sum; + maxIdx = i; + } + rightMax[i] = maxSum; + rightIdx[i] = maxIdx; + } + + vectorrets; + maxSum = 0; + for (int i=k; i+2*k<=n; i++) + { + int sum = presum[i+k-1] - (i==0?0:presum[i-1]); + if (sum + leftMax[i-1] + rightMax[i+k] > maxSum) + { + maxSum = sum + leftMax[i-1] + rightMax[i+k]; + rets = {leftIdx[i-1], i, rightIdx[i+k]}; + } + } + + return rets; + } +}; diff --git a/Greedy/689.Maximum-Sum-of-3-Non-Overlapping-Subarrays/Readme.md b/Greedy/689.Maximum-Sum-of-3-Non-Overlapping-Subarrays/Readme.md new file mode 100644 index 000000000..d9d0e136c --- /dev/null +++ b/Greedy/689.Maximum-Sum-of-3-Non-Overlapping-Subarrays/Readme.md @@ -0,0 +1,7 @@ +### 689.Maximum-Sum-of-3-Non-Overlapping-Subarrays + +这道题可以用动态规划很方便地求解最大值,但是如果要记录最大值所对应的区间的位置,则会略显麻烦。 + +考虑到本题恰好只求三段区间,所以很容易想到three-pass的套路。我们在数组里遍历一段长度为k的滑窗作为中间的区间,假设范围是`[i:i+k-1]`,那么我们只需要求在`[0:i-1]`内最大的长度为k的区间,以及在`[i+k:n-1]`内最大的长度为k的区间。这两个分量都是可以提前计算好的。我们只要在数组上从前往后跑一遍长度为k的滑窗,就可以记录任意前缀里曾经出现过的最大值,记做leftMax[i];同理,在数组上从后往前跑一遍长度为k的滑窗,就可以记录任意后缀里曾经出现过的最大值,记做rightMax[i]。所以我们只要找到全局最大的`leftMax[i-1] + sum[i:i+k-1] + rightMax[i+k]`即可。 + +除此之外,我们还需要记录下leftMax[i]所对应的最大滑窗的位置,即为leftIdx[i]。这里要注意一个细节,因为题意要求,如果有多个总和相同的解,取index位置最小的解。所以我们从左往右遍历的时候,只有在leftMax大于历史最大值的时候才更新leftIdx,这样在相同的leftMax的时候我们保留的是较小的index。同理,我们在从右往左遍历的时候,当rightMax大于等于历史最大值,就可以更新rightIdx,这样在相同的rightMax的时候我们保留的是较小的index。 diff --git a/Greedy/781.Rabbits-in-Forest/781.Rabbits-in-Forest.cpp. b/Greedy/781.Rabbits-in-Forest/781.Rabbits-in-Forest.cpp similarity index 100% rename from Greedy/781.Rabbits-in-Forest/781.Rabbits-in-Forest.cpp. rename to Greedy/781.Rabbits-in-Forest/781.Rabbits-in-Forest.cpp diff --git a/Greedy/828.Count-Unique-Characters-of-All-Substrings-of-a-Given-String/828.Count-Unique-Characters-of-All-Substrings-of-a-Given-String_v1.cpp b/Greedy/828.Count-Unique-Characters-of-All-Substrings-of-a-Given-String/828.Count-Unique-Characters-of-All-Substrings-of-a-Given-String_v1.cpp new file mode 100644 index 000000000..b406067a3 --- /dev/null +++ b/Greedy/828.Count-Unique-Characters-of-All-Substrings-of-a-Given-String/828.Count-Unique-Characters-of-All-Substrings-of-a-Given-String_v1.cpp @@ -0,0 +1,27 @@ +class Solution { +public: + int uniqueLetterString(string s) + { + int n = s.size(); + vector>pos(26); // pos[k]: the pos of letter k so far (by i-th) + + int ret = 0; + for (int i=0; i=2) + { + int m = pos[k].size(); + ret += pos[k][m-1] - pos[k][m-2]; + } + else if (pos[k].size()==1) + { + ret += pos[k][0]+1; + } + } + } + return ret; + } +}; diff --git a/Greedy/828.Count-Unique-Characters-of-All-Substrings-of-a-Given-String/828.Count-Unique-Characters-of-All-Substrings-of-a-Given-String_v2.cpp b/Greedy/828.Count-Unique-Characters-of-All-Substrings-of-a-Given-String/828.Count-Unique-Characters-of-All-Substrings-of-a-Given-String_v2.cpp new file mode 100644 index 000000000..b4aa5feb4 --- /dev/null +++ b/Greedy/828.Count-Unique-Characters-of-All-Substrings-of-a-Given-String/828.Count-Unique-Characters-of-All-Substrings-of-a-Given-String_v2.cpp @@ -0,0 +1,24 @@ +class Solution { +public: + int uniqueLetterString(string s) + { + int n = s.size(); + vector>pos(26); + + for (int k=0; k<26; k++) + pos[k].push_back(-1); + for (int i=0; i diStringMatch(string pattern) + { + pattern = "I" + pattern; + int n = pattern.size(); + + int mx = -1; + vectorarr; + + for (int i=0; i=mx+1; k--) + arr.push_back(k); + + mx = mx+count; + + i = j-1; + } + + return arr; + } +}; diff --git a/Greedy/942.DI-String-Match/Readme.md b/Greedy/942.DI-String-Match/Readme.md new file mode 100644 index 000000000..b978dc449 --- /dev/null +++ b/Greedy/942.DI-String-Match/Readme.md @@ -0,0 +1,5 @@ +### 942.DI-String-Match + +此题完全可以用`2375.Construct-Smallest-Number-From-DI-String`一样的方法,给出字典序最小的permutation。 + +基本思想是将s的开头加上一个“I”,将s切分为若干"IDD..D"的pattern,每个pattern对应的是一段单调递减的序列。保证序列与序列之间不重合即可。 diff --git a/Hash/1983.Widest-Pair-of-Indices-With-Equal-Range-Sum/1983.Widest-Pair-of-Indices-With-Equal-Range-Sum.cpp b/Hash/1983.Widest-Pair-of-Indices-With-Equal-Range-Sum/1983.Widest-Pair-of-Indices-With-Equal-Range-Sum.cpp index 192803a27..0c92d80cb 100644 --- a/Hash/1983.Widest-Pair-of-Indices-With-Equal-Range-Sum/1983.Widest-Pair-of-Indices-With-Equal-Range-Sum.cpp +++ b/Hash/1983.Widest-Pair-of-Indices-With-Equal-Range-Sum/1983.Widest-Pair-of-Indices-With-Equal-Range-Sum.cpp @@ -1,30 +1,23 @@ class Solution { public: int widestPairOfIndices(vector& nums1, vector& nums2) - { - int sum = 0; - int n = nums1.size(); - vectorpre1(n+1); - vectorpre2(n+1); - - for (int i=1; i<=n; i++) - pre1[i] = pre1[i-1]+nums1[i-1]; - for (int i=1; i<=n; i++) - pre2[i] = pre2[i-1]+nums2[i-1]; - - vectordiff(n+1); - for (int i=0; i<=n; i++) - diff[i] = pre1[i]-pre2[i]; - + { + vectorarr; + for (int i=0; iMap; - Map[0]=0; + Map[0] = -1; + + int presum = 0; int ret = 0; - for (int i=1; i<=n; i++) + for (int i=0; i num(n - 1, true); - - for (int x = 2; x <= sqrt(n); x++) - { - if (num[x]==false) continue; - - for (int j=2; x*j < n; j++) - num[x*j] = false; - } - - int count=0; - for (int j = 2; j < n; ++j) - { - if (num[j]) count++; - } - - return count; - } -}; diff --git a/Hash/204.Count-Primes/Readme.md b/Hash/204.Count-Primes/Readme.md deleted file mode 100644 index 715e63119..000000000 --- a/Hash/204.Count-Primes/Readme.md +++ /dev/null @@ -1,18 +0,0 @@ -### 204.Count-Primes - -用倍数筛除法去除所有已知质数的倍数。最高效的容器是bool型的vector -```cpp -vectorq(n,true); -for (x=2; x<=sqrt(n); x++) -{ - if (q[x]==false) continue; - for (int i=2; x*i& nums) + { + unordered_mapMap; + for (int i=0; i& nums, int k) + { + for (auto& x: nums) + { + if (x>k) x=1; + else if (x==k) x=0; + else x=-1; + } + + unordered_mapoddSum; + unordered_mapevenSum; + + evenSum[0] = 1; + + int sum = 0; + int ret = 0; + for (int i=0; i, int>Map; +public: + long long fixedRatio(string s, int num1, int num2) + { + Map[{0,0}] = 1; + LL a = 0, b = 0; + LL ret = 0; + for (auto ch: s) + { + if (ch=='0') a++; + else b++; + + LL k = min(a/num1, b/num2); + LL x = a-k*num1, y = b-k*num2; + + if (Map.find({x,y})!=Map.end()) + ret += Map[{x,y}]; + + Map[{x,y}] += 1; + } + + return ret; + } +}; diff --git a/Hash/2489.Number-of-Substrings-With-Fixed-Ratio/Readme.md b/Hash/2489.Number-of-Substrings-With-Fixed-Ratio/Readme.md new file mode 100644 index 000000000..e0d2a9144 --- /dev/null +++ b/Hash/2489.Number-of-Substrings-With-Fixed-Ratio/Readme.md @@ -0,0 +1,9 @@ +### 2489.Number-of-Substrings-With-Fixed-Ratio + +对于区间,我们固定会联想到前缀和的应用。在本题里,如果一个区间[a,b]里的0/1比是固定的num1:num2,那么对于前缀prefix[b]和prefix[a-1]应该有什么共同的性质呢? + +此前我们遇到过类似的题目:如果区间[a,b]的元素和能被k整除,那么必然prefix[b]和prefix[a-1]关于k的余数一定相同。 + +类似的,本题里,我们希望prefix[b]和prefix[a-1]应该是关于“num1个0 + num2个1"这个“循环节”的余数相同。怎么定义这个余数呢?假设前缀长度L里面有a个0与b个1,我们可以知道里面出现了k次“num1个0 + num2个1",其中`k=min(a/num1,b/num2)`。去除这些完整的循环节,剩下的零头的0与,就代表了“余数”。 + +所以我们定义hash表,key是“整除循环节后两种字符的零头”,value就是这个前缀出现了多少次。我们依次处理前缀,当该前缀的零头是{a,b},我们就看hash表里这样的零头pair已经出现过了多少次(前缀),这就意味着可以组成多少对区间,使得该区间里两种字符的个数恰好是“num1个0 + num2个1"的整数倍。 diff --git a/Hash/2588.Count-the-Number-of-Beautiful-Subarrays/2588.Count-the-Number-of-Beautiful-Subarrays.cpp b/Hash/2588.Count-the-Number-of-Beautiful-Subarrays/2588.Count-the-Number-of-Beautiful-Subarrays.cpp new file mode 100644 index 000000000..2794ed76c --- /dev/null +++ b/Hash/2588.Count-the-Number-of-Beautiful-Subarrays/2588.Count-the-Number-of-Beautiful-Subarrays.cpp @@ -0,0 +1,25 @@ +class Solution { +public: + long long beautifulSubarrays(vector& nums) + { + unordered_mapMap; + Map[0] = 1; + int state = 0; + long long ret = 0; + for (int i=0; i>k)&1) + ((state>>k)&1); + t = t%2; + state = state - (((state>>k)&1)<& nums) + { + unordered_mapMap; + // Map[state] : how many times of state there have been + Map[0] = 1; + + int state = 0; + LL ret = 0; + for (int i=0; i& nums, int modulo, int k) + { + int n = nums.size(); + int count = 0; + unordered_mapMap; + Map[0]+=1; + LL ret = 0; + + for (int i=0; i& nums, int target) + { + LL total = accumulate(nums.begin(), nums.end(), 0LL); + + int n = nums.size(); + for (int i=0; ipresum(2*n, 0); + presum[0] = nums[0]; + for (int i=1; i<2*n; i++) + presum[i] = presum[i-1] + nums[i]; + + LL ret = INT_MAX/2; + + unordered_mapMap; // presum module, index + Map[0] = -1; + for (int i=0; i<2*n; i++) + { + LL r = ((presum[i] - target) % total + total) % total; + + if (Map.find(r)!=Map.end()) + { + int j = Map[r]; + LL k = ((j==-1? 0: presum[j]) - presum[i] + target) / total; + ret = min(ret, i-j+k*n); + } + + Map[presum[i]%total] = i; + } + + if (ret == INT_MAX/2) return -1; + else return ret; + } +}; diff --git a/Hash/2875.Minimum-Size-Subarray-in-Infinite-Array/Readme.md b/Hash/2875.Minimum-Size-Subarray-in-Infinite-Array/Readme.md new file mode 100644 index 000000000..5ad313659 --- /dev/null +++ b/Hash/2875.Minimum-Size-Subarray-in-Infinite-Array/Readme.md @@ -0,0 +1,12 @@ +### 2875.Minimum-Size-Subarray-in-Infinite-Array + +很显然,任何一个subarray,都可以表示成“nums的某个后缀 + 若干个重复的nums + nums的某个前缀”。 + +我们将nums重复一遍(长度变成2n)之后,上述的subarray sum就是:`nums[i:j] + k * total`. 其中total是nums[0:n-1]之和。[i:j]是在[0:2n-1]上的一个子区间。k是某个整数(可以是0). + +当我们遍历j的位置,考察某个j时,期望 `presum[j]-presum[i-1]+k*total = target`,转换一下就是`presum[i-1] = presum[j] - target + k*total`。显然,为了使这个式子有解,充要条件就是`presum[i-1]`和`presum[j] - target`关于total同余,并且需要iSet = {'a','e','i','o','u' }; + +public: + vectorEratosthenes(int n) + { + vectorq(n+1,0); + vectorprimes; + for (int i=2; i<=sqrt(n); i++) + { + if (q[i]==1) continue; + int j=i*2; + while (j<=n) + { + q[j]=1; + j+=i; + } + } + for (int i=2; i<=n; i++) + { + if (q[i]==0) + primes.push_back(i); + } + return primes; + } + + long long beautifulSubstrings(string s, int k) + { + vectorprimes = Eratosthenes(k); + int m = 1; + for (int p:primes) + { + int count = 0; + while (k%p==0) + { + count++; + k/=p; + } + if (count!=0 && count%2==1) + m *= pow(p, (count+1)/2); + else if (count!=0 && count%2==0) + m *= pow(p, count/2); + } + m*=2; + + int n = s.size(); + s.insert(s.begin(), '#'); + int ret = 0; + + map>Map; + Map[0][0]=1; + + int count = 0; + + for (int i=1; i<=n; i++) + { + if (Set.find(s[i])!=Set.end()) + count++; + else + count--; + + if (Map.find(count)!=Map.end() && Map[count].find(i%m)!=Map[count].end()) + ret += Map[count][i%m]; + + Map[count][i%m]+=1; + } + + return ret; + } +}; diff --git a/Hash/2949.Count-Beautiful-Substrings-II/Readme.md b/Hash/2949.Count-Beautiful-Substrings-II/Readme.md new file mode 100644 index 000000000..71075d0dd --- /dev/null +++ b/Hash/2949.Count-Beautiful-Substrings-II/Readme.md @@ -0,0 +1,11 @@ +### 2949.Count-Beautiful-Substrings-II + +对于求substring的问题,我们很容想到用前缀和之差来解决。显然,对于以i结尾的substring,我们想要找满足条件的起始位置j,需要满足两个条件: +1. [j+1:i]内的元音辅音个数相等 <-> [0:j]的元音辅音个数之差必须等于[0:i]的元音辅音个数之差。 +2. [j+1:i]内的元音辅音个数乘积能被k整除 <-> `[(i-j)/2]^2 % k ==0` <-> `[(i-j)]^2 % 4k ==0` + +对于第一个条件,我们只要根据[0:i]的元音辅音个数之差(假设为d),在hash表中查找之前有多少个前缀串的元音辅音个数之差也是d。 + +对于第二个条件,理论上只要i与j关于sqrt(4k)同余,那么(i-j)就能被sqrt(4k)整除,也就是说(i-j)^2能被4k整除。但是sqrt(4k)可能不是一个整数。所以我们需要将k分解质因数,对于出现奇数次的质因子,显然我们需要再补一个该质因子以便k能被开方。我们将这样“松弛”后的k的开方结果记做m,那么我们只要i与j关于2m同余。就保证[(i-j)]^2能被4k整除。 + +于是本题的思路就是建立包含两个key的hash,来记录每个前缀的两个信息:元音辅音的个数之差,前缀长度关于2m的余数。对于任意的位置i,如果在hash表里能找到两个key都相同的位置j,那么[j+1:i]就是符合要求的substring。 diff --git a/Hash/2950.Number-of-Divisible-Substrings/2950.Number-of-Divisible-Substrings.cpp b/Hash/2950.Number-of-Divisible-Substrings/2950.Number-of-Divisible-Substrings.cpp new file mode 100644 index 000000000..039eda813 --- /dev/null +++ b/Hash/2950.Number-of-Divisible-Substrings/2950.Number-of-Divisible-Substrings.cpp @@ -0,0 +1,29 @@ +class Solution { +public: + int countDivisibleSubstrings(string word) + { + int n = word.size(); + word = "#"+word; + vectorpresum(n+1); + for (int i=1; i<=n; i++) + presum[i] = presum[i-1] + ((word[i]-'a'+1)/3+1); + + map>Map; + for (int m=1; m<=9; m++) + Map[m][0] = 1; + + int ret = 0; + for (int j=1; j<=n; j++) + { + for (int m = 1; m <=9; m++) + { + int key = presum[j] - m*j; + if (Map.find(m)!=Map.end() && Map[m].find(key)!=Map[m].end()) + ret += Map[m][key]; + Map[m][key]+=1; + } + } + + return ret; + } +}; diff --git a/Hash/2950.Number-of-Divisible-Substrings/Readme.md b/Hash/2950.Number-of-Divisible-Substrings/Readme.md new file mode 100644 index 000000000..af0d0833b --- /dev/null +++ b/Hash/2950.Number-of-Divisible-Substrings/Readme.md @@ -0,0 +1,9 @@ +### 2950.Number-of-Divisible-Substrings + +此题有精彩的o(N)解法。 + +对于以j为结尾的substring,我们希望找到位置i& nums, int k) { - unordered_map>Map; - int sum=0; + unordered_map>Map; + long sum=0; int result=INT_MIN; nums.insert(nums.begin(),0); diff --git a/Hash/3448.Count-Substrings-Divisible-By-Last-Digit/3448.Count-Substrings-Divisible-By-Last-Digit.cpp b/Hash/3448.Count-Substrings-Divisible-By-Last-Digit/3448.Count-Substrings-Divisible-By-Last-Digit.cpp new file mode 100644 index 000000000..fd25aa2ae --- /dev/null +++ b/Hash/3448.Count-Substrings-Divisible-By-Last-Digit/3448.Count-Substrings-Divisible-By-Last-Digit.cpp @@ -0,0 +1,43 @@ +using LL = long long; +class Solution { + int n; +public: + long long countSubstrings(string s) + { + n = s.size(); + vectornums; + for (auto ch: s) + nums.push_back(ch-'0'); + nums.insert(nums.begin(), 0); + + LL ret = 0; + for (int k=1; k<=9; k++) + ret += helper(nums, k); + return ret; + } + + LL helper(vector&nums, int k) + { + vectorcount(k, 0); + vectorcount2(k,0); + LL ret = 0; + + int r = 0; + count[0] = 1; + for (int i=1; i<=n; i++) + { + for (int d=0; d& nums) - { - int sum=0; - unordered_mapMap; - Map[0]=-1; - - int result=0; - for (int i=0; i& nums) + { + unordered_mapMap; // presum -> j + Map[0] = -1; + + int ret = 0; + int presum = 0; + for (int i=0; i& A, int S) + { + int n = A.size(); + vectorpostZeros(n); + int count = 0; + for (int i=n-1; i>=0; i--) + { + postZeros[i] = count; + if (A[i]==0) + count++; + else + count = 0; + } + + int j = 0, sum = 0; + int ret = 0; + for (int i=0; i& A, int K) - { + int subarraysDivByK(vector& nums, int k) { unordered_mapMap; - Map[0] = 1; - int presum = 0; - int ret = 0; - for (int i=0; i 0 ? presum%K : (presum%K+K)%K; - ret += Map[r]; + + int r = 0; + int count = 0; + for (int i=0; inext作为新的head,原本的end之后指向原本的head,再把p的next指向NULL, [Leetcode Link](https://leetcode.com/problems/rotate-list) diff --git a/Math/1017.Convert-to-Base--2/Readme.md b/Math/1017.Convert-to-Base--2/Readme.md index 3ebdc9708..c9bd4ac28 100644 --- a/Math/1017.Convert-to-Base--2/Readme.md +++ b/Math/1017.Convert-to-Base--2/Readme.md @@ -2,11 +2,11 @@ 本质上和求N的任何K进制的转化一样的做法。求得余数```r=N%K```作为当前最低位的数字,然后将```N=(N-r)/K```作为下一个循环的初始值直至为零。把所有的数字拼接起来倒序输出就是K进制的结果。 -特别注意,余数r必须是正数,也就是说无法除尽的时候,采用的是向下取整。比如说5/(-3),依据严格的数学定义,商是-2,余数是1. +特别注意,余数r必须是正数,也就是说无法除尽的时候,采用的是向下取整。比如说(-5)/(-3),依据严格的数学定义,商是2,余数是1. -但是,当除数是负数的时候,不同语言的运算规则会不一样。在C++/Java里面,整数的除法都是向零取整。比如说5/(-3),结果商是-1,余数是-2.这个余数因为是负数,是无法用来作为进制转换结果的。解决方案是:将商加上一,余数加上abs(K)。这样就转变成了向下取整的结果,余数也变成了正数。在这个例子中,结果商就是-1,余数是1. +但是,当除数是负数的时候,不同语言的运算规则会不一样。在C++/Java里面,整数的除法都是向零取整。比如说(-5)/(-3),结果商是1,余数是-2.这个余数因为是负数,是无法用来作为进制转换结果的。解决方案是:将商加上一,余数加上abs(K)。这样就转变成了向下取整的结果,余数也变成了正数。在这个例子中,结果商就是2,余数是1. 事实上,在wiki里面已经明确写明了negative base calculation的方法:https://en.wikipedia.org/wiki/Negative_base#Calculation -[Leetcode Link](https://leetcode.com/problems/convert-to-base--2) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/convert-to-base--2) diff --git a/Math/1286.Iterator-for-Combination/1286.Iterator-for-Combination.cpp b/Math/1286.Iterator-for-Combination/1286.Iterator-for-Combination.cpp index 9726993eb..f050f3ed1 100644 --- a/Math/1286.Iterator-for-Combination/1286.Iterator-for-Combination.cpp +++ b/Math/1286.Iterator-for-Combination/1286.Iterator-for-Combination.cpp @@ -1,7 +1,7 @@ class CombinationIterator { string cur; string end; - bool flag; + bool firstCall; string characters; int combinationLength; @@ -10,16 +10,16 @@ class CombinationIterator { { cur = characters.substr(0,combinationLength); end = characters.substr(characters.size()-combinationLength); - flag = 1; + firstCall = 1; this->characters = characters; this->combinationLength = combinationLength; } string next() { - if (flag) + if (firstCall) { - flag = 0; + firstCall = 0; return cur; } @@ -37,6 +37,6 @@ class CombinationIterator { bool hasNext() { - return cur!=end; + return firstCall==1 || cur!=end; } }; diff --git a/Math/1467.Probability-of-a-Two-Boxes-Having-The-Same-Number-of-Distinct-Balls/1467.Probability-of-a-Two-Boxes-Having-The-Same-Number-of-Distinct-Balls.cpp b/Math/1467.Probability-of-a-Two-Boxes-Having-The-Same-Number-of-Distinct-Balls/1467.Probability-of-a-Two-Boxes-Having-The-Same-Number-of-Distinct-Balls.cpp index b177fd300..aed69116e 100644 --- a/Math/1467.Probability-of-a-Two-Boxes-Having-The-Same-Number-of-Distinct-Balls/1467.Probability-of-a-Two-Boxes-Having-The-Same-Number-of-Distinct-Balls.cpp +++ b/Math/1467.Probability-of-a-Two-Boxes-Having-The-Same-Number-of-Distinct-Balls/1467.Probability-of-a-Two-Boxes-Having-The-Same-Number-of-Distinct-Balls.cpp @@ -23,19 +23,16 @@ class Solution { return good*1.0/all; } - void dfs(int level, vector&set1, vector&set2) + void dfs(int k, vector&set1, vector&set2) { - if (level == balls.size()) - { - int tot1 = accumulate(set1.begin(), set1.end(), 0); - int tot2 = accumulate(set2.begin(), set2.end(), 0); - if (tot1!=tot2) return; + if (k == balls.size()) + { + if (accumulate(set1.begin(), set1.end(), 0) != accumulate(set2.begin(), set2.end(), 0)) + return; long long p = 1; for (int i=0; iisPrime(n, true); + int count = 0; + for (int i=2; i kthPalindrome(vector& queries, int intLength) + { + vectorrets; + for (int k: queries) + { + if (intLength == 1) + { + if (k>9) rets.push_back(-1); + else rets.push_back(k); + } + else if (intLength%2==0) + { + int m = intLength/2; + LL a = getKth(m, k); + if (a==-1) + { + rets.push_back(-1); + continue; + } + LL b = flip(a); + rets.push_back(a*pow(10, m) + b); + } + else + { + int m = intLength/2; + LL a = getKth(m+1, k); + if (a==-1) + { + rets.push_back(-1); + continue; + } + LL c = a % 10; + a = a/10; + LL b = flip(a); + rets.push_back(a*pow(10, m+1) + c*pow(10,m) + b); + } + } + return rets; + } + + LL getKth(int m, int k) + { + if (k > 9*pow(10, m-1)) return -1; + else return pow(10, m-1) + k-1; + } + + LL flip(LL a) + { + LL ret = 0; + while (a>0) + { + ret = ret*10+(a%10); + a/=10; + } + return ret; + } + +}; diff --git a/Math/2217.Find-Palindrome-With-Fixed-Length/Readme.md b/Math/2217.Find-Palindrome-With-Fixed-Length/Readme.md new file mode 100644 index 000000000..925cd860a --- /dev/null +++ b/Math/2217.Find-Palindrome-With-Fixed-Length/Readme.md @@ -0,0 +1,7 @@ +### 2217.Find-Palindrome-With-Fixed-Length + +因为回文数是镜像的。所以对于固定长度为L、第k大的回文数,其实只要求一半长度的第k个自然数即可。更具体地,我们要根据L的奇偶性分情况讨论。 + +如果L是偶数,那么本题就转换成了求长度为L/2的第k大的自然数。因为长度为L/2的最小自然数是1000...,即1e(L/2-1),所以回文数的左半边是```a = 1e(L/2-1) + (k-1)```。再将a镜像得到b,然后 ```a*1e(L/2)+b```就是最终的答案。 + +如果L是奇数,那么待求的回文数就是acb的形式。其中b是a的镜像,长度都是L/2;c是一个single digit。于是第k大的回文数,就等价于长度是L/2+1的第k大的自然数,即```a = 1e(L/2) + (k-1)```. 然后我们得到```c = a % 10```。将a去掉最后一位后,得到其镜像数字为b。最终的答案是```a*1e(L/2+1) + c*1e(L/2) + b```。 diff --git a/Math/2221.Find-Triangular-Sum-of-an-Array/2221.Find-Triangular-Sum-of-an-Array.cpp b/Math/2221.Find-Triangular-Sum-of-an-Array/2221.Find-Triangular-Sum-of-an-Array.cpp new file mode 100644 index 000000000..28de457c6 --- /dev/null +++ b/Math/2221.Find-Triangular-Sum-of-an-Array/2221.Find-Triangular-Sum-of-an-Array.cpp @@ -0,0 +1,27 @@ +using ll = long long; +class Solution { +public: + int triangularSum(vector& nums) + { + ll comb[1000][1000]; + int n = nums.size()-1; + for (int i = 0; i <= n; ++i) + { + comb[i][i] = comb[i][0] = 1; + if (i==0) continue; + for (int j = 1; j < i; ++j) + { + comb[i][j] = comb[i - 1][j - 1] + comb[i - 1][j]; + comb[i][j] %= 10; + } + } + + ll ret = 0; + for (int i=0; i<=n; i++) + { + ret += nums[i]*comb[n][i]%10; + } + + return ret%10; + } +}; diff --git a/Math/2221.Find-Triangular-Sum-of-an-Array/Readme.md b/Math/2221.Find-Triangular-Sum-of-an-Array/Readme.md new file mode 100644 index 000000000..3aebc9fda --- /dev/null +++ b/Math/2221.Find-Triangular-Sum-of-an-Array/Readme.md @@ -0,0 +1,33 @@ +### 2221.Find-Triangular-Sum-of-an-Array + +本题最直观的方法就是模拟,根据数据规模,N^2的时间复杂度是可以接受的。 + +本题其实还有另外一个切入的角度。从题目上看,三角形的构造方法与杨辉三角形非常相似,所以此题一定和二项式系数有关。 + +我们从下往上观察,最后两行是 +``` +1 1 + 1 +``` +这意味着此时最上面一行的每一个元素对于最终结果(即最底角的元素)的贡献是1:1. + +再观察最后三行 +``` +1 2 1 + 1 1 + 1 +``` +此时发现,最上面一行的每一个元素对于最终结果(即最底角的元素)的贡献恰好就是1:2:1. 究其原因,元素(1,1)会复制给(2,1),元素(1,2)会复制给(2,1)和(2,2)造成了双倍的贡献,而元素(1,3)又会只贡献给(2,2)。也就是说,我们只需要通过第二行,就可以推出第一行里每个元素对于底角元素的贡献值。 + +再观察最后四行 +``` +1 3 3 1 + 1 2 1 + 1 1 + 1 +``` +很明显了,最上面一行的每个元素对于底角元素的贡献值比例1:3:3:1,它就是(a+b)^3的二项式系数,即C(3,0),C(3,1),C(3,2),C(3,3),其中C(x,y)表示组合数。 + +于是我们就可以得出结论,令n = nums.size()-1,那么原始数值里的nums[i],会复制C(n,i)份计入到递交元素中。我们只需要将nums按照n次二项式系数加权平均即可。 + +计算二项式系数,就是算组合数,可以参考[模板代码](https://github.com/wisdompeak/LeetCode/blob/master/Template/Math/Combination-Number.cpp) diff --git a/Math/2280.Minimum-Lines-to-Represent-a-Line-Chart/2280.Minimum-Lines-to-Represent-a-Line-Chart.cpp b/Math/2280.Minimum-Lines-to-Represent-a-Line-Chart/2280.Minimum-Lines-to-Represent-a-Line-Chart.cpp new file mode 100644 index 000000000..5f7c27d29 --- /dev/null +++ b/Math/2280.Minimum-Lines-to-Represent-a-Line-Chart/2280.Minimum-Lines-to-Represent-a-Line-Chart.cpp @@ -0,0 +1,26 @@ +class Solution { +public: + int minimumLines(vector>& stockPrices) + { + if (stockPrices.size()==1) return 0; + sort(stockPrices.begin(), stockPrices.end()); + + int ret = 1; + int n = stockPrices.size(); + for (int i=2; i>& stockPrices, int t) + { + int x0 = stockPrices[t-2][0], y0 = stockPrices[t-2][1]; + int x1 = stockPrices[t-1][0], y1 = stockPrices[t-1][1]; + int x2 = stockPrices[t-0][0], y2 = stockPrices[t-0][1]; + + return (long long)(y2-y1)*(x1-x0)==(long long)(y1-y0)*(x2-x1); + } +}; diff --git a/Math/2280.Minimum-Lines-to-Represent-a-Line-Chart/Readme.md b/Math/2280.Minimum-Lines-to-Represent-a-Line-Chart/Readme.md new file mode 100644 index 000000000..998b5a1d6 --- /dev/null +++ b/Math/2280.Minimum-Lines-to-Represent-a-Line-Chart/Readme.md @@ -0,0 +1,7 @@ +### 2280.Minimum-Lines-to-Represent-a-Line-Chart + +将所有点按照横坐标排序之后,本题的核心就是判断任意相邻的三个点ABC是否共线。容易想到,我们判断线段AB和BC的斜率是否相等,即```(y3-y2)/(x3-x2)==(y2-y1)/(x2-x1)```.这里可能会有两个问题,第一个就是精度,当两个小数非常接近时,浮点数很难判断准确。第二个就是当斜率是90度时,除数为0(此题中这个问题不存在)。 + +解决方案就是将除法转换为乘法,即判断```(y3-y2)*(x2-x1)==(y2-y1)*(x3-x2)```即可。 + +相同的技巧在```2152.Minimum-Number-of-Lines-to-Cover-Points```中也用到过。 diff --git a/Math/233.Number-of-Digit-One/Readme.md b/Math/233.Number-of-Digit-One/Readme.md index 27aa4842a..948dbd30d 100644 --- a/Math/233.Number-of-Digit-One/Readme.md +++ b/Math/233.Number-of-Digit-One/Readme.md @@ -16,5 +16,6 @@ c. 如果Y<1,那么最低两位如论取什么,都会导致这个数大于n 综合整理上述的分类讨论方案,可以将它适用于任何一个数位(个位、十位、千位、万位...),将这些数的统计全部加起来就是答案。 +此题和`1067. Digit Count in Range`非常相似。 -[Leetcode Link](https://leetcode.com/problems/number-of-digit-one) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/number-of-digit-one) diff --git a/Math/2344.Minimum-Deletions-to-Make-Array-Divisible/2344.Minimum-Deletions-to-Make-Array-Divisible.cpp b/Math/2344.Minimum-Deletions-to-Make-Array-Divisible/2344.Minimum-Deletions-to-Make-Array-Divisible.cpp new file mode 100644 index 000000000..8fa809264 --- /dev/null +++ b/Math/2344.Minimum-Deletions-to-Make-Array-Divisible/2344.Minimum-Deletions-to-Make-Array-Divisible.cpp @@ -0,0 +1,21 @@ +class Solution { +public: + int minOperations(vector& nums, vector& numsDivide) + { + int x = numsDivide[0]; + + for (int i=1; ik) return 0; + + vector>dp(k+1, vector(2*k+10, 0)); + dp[0][0+offset] = 1; + + for (int t=1; t<=k; t++) + for (int p = -k; p<=k; p++) + { + if (p-1 >= -k) + dp[t][p+offset] = (dp[t][p+offset] + dp[t-1][p-1+offset]) % M; + if (p+1 <= k) + dp[t][p+offset] = (dp[t][p+offset] + dp[t-1][p+1+offset]) % M; + } + + return dp[k][abs(endPos - startPos) + offset]; + } +}; diff --git a/Math/2400.Number-of-Ways-to-Reach-a-Position-After-Exactly-k-Steps/2400.Number-of-Ways-to-Reach-a-Position-After-Exactly-k-Steps_v2.cpp b/Math/2400.Number-of-Ways-to-Reach-a-Position-After-Exactly-k-Steps/2400.Number-of-Ways-to-Reach-a-Position-After-Exactly-k-Steps_v2.cpp new file mode 100644 index 000000000..52f9c75dc --- /dev/null +++ b/Math/2400.Number-of-Ways-to-Reach-a-Position-After-Exactly-k-Steps/2400.Number-of-Ways-to-Reach-a-Position-After-Exactly-k-Steps_v2.cpp @@ -0,0 +1,30 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { + LL comb[1005][1005]; +public: + int numberOfWays(int startPos, int endPos, int k) + { + int diff = abs(endPos-startPos); + if (diff > k) return 0; + + if ((diff + k)%2==1) return 0; + + int a = (k+diff)/2; + int b = (k-diff)/2; + + return C(k,a); + } + + LL C(int m, int n) + { + if (comb[m][n]!=0) return comb[m][n]; + if (n==0) return 1; + if (m==n) return 1; + + LL ret = C(m-1, n-1) + C(m-1, n); + comb[m][n] = ret % M; + + return comb[m][n]; + } +}; diff --git a/Math/2400.Number-of-Ways-to-Reach-a-Position-After-Exactly-k-Steps/Readme.md b/Math/2400.Number-of-Ways-to-Reach-a-Position-After-Exactly-k-Steps/Readme.md new file mode 100644 index 000000000..5af454938 --- /dev/null +++ b/Math/2400.Number-of-Ways-to-Reach-a-Position-After-Exactly-k-Steps/Readme.md @@ -0,0 +1,9 @@ +### 2400.Number-of-Ways-to-Reach-a-Position-After-Exactly-k-Steps + +#### 解法1:常规DP +本题常规的解法是DP。我们令dp[t][p]表示第t步的时候移动到p的位置有多少种方案。显然我们有```dp[t][p] = dp[t-1][p-1] + dp[t1][p+1]```. + +这里我们需要注意的是startPos和endPos的绝对位置对于我们没有用处,我们只关心最终的相对移动,即`|endPos-startPos|`.我们在计算dp的循环里,p的范围应该是[-k,k]. + +#### 解法2:组合数 +我们令`d=|endPos-startPos|`,那么我们知道,为了通过k步的移动、最终偏移d的位置,需要前进a步且后退b步,且`a+b=k`,其中a就是`(d+k)/2`,其中`d+k`必须能被2整除。这样的话,本题就是求在k步移动里如何安排其中的a步前进。顾方案数就是comb(k,a)。我们可以利用组合数的递推公式`comb[i][j] = comb[i - 1][j - 1] + comb[i - 1][j]`求解。 diff --git a/Math/2448.Minimum-Cost-to-Make-Array-Equal/2448.Minimum-Cost-to-Make-Array-Equal_v1.cpp b/Math/2448.Minimum-Cost-to-Make-Array-Equal/2448.Minimum-Cost-to-Make-Array-Equal_v1.cpp new file mode 100644 index 000000000..17616c4f0 --- /dev/null +++ b/Math/2448.Minimum-Cost-to-Make-Array-Equal/2448.Minimum-Cost-to-Make-Array-Equal_v1.cpp @@ -0,0 +1,34 @@ +using LL = long long; +class Solution { +public: + long long minCost(vector& nums, vector& cost) + { + int n = nums.size(); + vector>arr; + for (int i=0; i= totalCost*1.0/2) + { + k = i; + break; + } + } + + LL ret = 0; + for (int i=0; i& nums, vector& cost) + { + int n = nums.size(); + vector>arr; + for (int i=0; ileft(n, 0); + LL sum = arr[0].second; + for (int i=1; iright(n, 0); + sum = arr[n-1].second; + for (int i=n-2; i>=0; i--) + { + LL delta = arr[i+1].first - arr[i].first; + right[i] = right[i+1] + delta * sum; + sum += arr[i].second; + } + + vectorall(n,0); + for (int i=0; istr_arr; + istringstream in(s); + int k = 0; + for (string word; in>>word; k++) + str_arr.push_back(word); + + inv[1] = 1; + for(int i=2; i<=100000; ++i) + inv[i]=(M-M/i) * inv[M%i] % M; + + LL ret = 1; + for (auto& str: str_arr) + { + ret = ret * helper(str) % M; + } + return ret; + } + + LL helper(string& s) + { + unordered_mapMap; + for (auto ch: s) + Map[ch]+=1; + + int N = s.size(); + LL ret = 1; + for (int i=1; i<=N; i++) + ret = ret * i % M; + + for (auto [k,v]:Map) + { + for (int i=1; i<=v; i++) + ret = ret * inv[i] % M; + } + return ret; + } +}; diff --git a/Math/2514.Count-Anagrams/Readme.md b/Math/2514.Count-Anagrams/Readme.md new file mode 100644 index 000000000..a0600ff82 --- /dev/null +++ b/Math/2514.Count-Anagrams/Readme.md @@ -0,0 +1,7 @@ +### 2514.Count-Anagrams + +本题的本质就是计算每个单词的distinct permutation的乘积。 + +对于一个单词长度为n,则distinct permutation的数目就是`n! / prod{k_i !}`,其中k_i就是每个字母在该单词中出现的频次。 + +本题的难点在于模下计算。上述公式中ki的阶乘是在分母上,所以需要取逆元。即转换为 `n! * prod{inv[k_i]!}`。因为ki的频次不超过1e5,所以我们可以提前预处理,用线性时间算出1e5以内所有的inv[i]. diff --git a/Math/2539.Count-the-Number-of-Good-Subsequences/2539.Count-the-Number-of-Good-Subsequences.cpp b/Math/2539.Count-the-Number-of-Good-Subsequences/2539.Count-the-Number-of-Good-Subsequences.cpp new file mode 100644 index 000000000..05dbc519c --- /dev/null +++ b/Math/2539.Count-the-Number-of-Good-Subsequences/2539.Count-the-Number-of-Good-Subsequences.cpp @@ -0,0 +1,57 @@ +using LL = long long; +class Solution { + LL M = 1e9+7; + vector factorial; +public: + vector GetFactorial(LL N) + { + vectorrets(N+1); + rets[0] = 1; + for (int i=1; i<=N; i++) + rets[i] = rets[i-1] * i % M; + return rets; + } + + long long quickPow(long long x, long long N) { + if (N == 0) { + return 1; + } + LL y = quickPow(x, N / 2) % M; + return N % 2 == 0 ? (y * y % M) : (y * y % M * x % M); + } + + LL comb(LL m, LL n) + { + if (n>m) return 0; + LL a = factorial[m]; + LL b = factorial[n] * factorial[m-n] % M; + LL inv_b = quickPow(b, (M-2)); + + return a * inv_b % M; + } + + int countGoodSubsequences(string s) + { + unordered_mapMap; + for (auto ch: s) + Map[ch] += 1; + + vectorcount; + for (auto [k,v]: Map) + count.push_back(v); + + int n = *max_element(count.begin(), count.end()); + + factorial = GetFactorial(n); + + LL ret = 0; + for (int f=1; f<=n; f++) + { + LL temp = 1; + for (int c: count) + temp = temp * (comb(c, f)+1) % M; + ret = (ret + temp -1) % M; + } + return ret; + } +}; diff --git a/Math/2539.Count-the-Number-of-Good-Subsequences/Readme.md b/Math/2539.Count-the-Number-of-Good-Subsequences/Readme.md new file mode 100644 index 000000000..5b41d7238 --- /dev/null +++ b/Math/2539.Count-the-Number-of-Good-Subsequences/Readme.md @@ -0,0 +1,25 @@ +### 2539.Count-the-Number-of-Good-Subsequences + +我们遍历频次f从1到n(其中n是所有字母里最高的频次)。对于固定的频次f,我们考察26个字母中,每个字母取f个的组合数comb(c,f),以及不取的决策(即+1),这样就可以保证所取的子序列里每个字母的频次都一致。 +```cpp +for (int f=1; f<=n; f++) +{ + LL temp = 1; + for (int c: count) + temp = temp * (comb(c, f)+1) % M; + ret = (ret + temp -1) % M; +} +``` +注意,对于频次f,我们其实都包括了“全不取”的策略,这相当于空串是不合法的,所以对于每个temp我们都要减一。 + +关于计算组合数,应为要对M取模,我们可以直接用组合数定义和费马小定理来硬算,即 +```cpp +LL comb(LL m, LL n) +{ + if (n>m) return 0; + LL a = factorial[m]; + LL b = factorial[n] * factorial[m-n] % M; + LL inv_b = quickPow(b, (M-2)); + return a * inv_b % M; +} +``` diff --git a/Math/2543.Check-if-Point-Is-Reachable/2543.Check-if-Point-Is-Reachable.cpp b/Math/2543.Check-if-Point-Is-Reachable/2543.Check-if-Point-Is-Reachable.cpp new file mode 100644 index 000000000..377b52fc4 --- /dev/null +++ b/Math/2543.Check-if-Point-Is-Reachable/2543.Check-if-Point-Is-Reachable.cpp @@ -0,0 +1,9 @@ +class Solution { +public: + bool isReachable(int targetX, int targetY) + { + int g = gcd((LL)targetX, (LL)targetY); + while (g%2==0) g/=2; + return g==1; + } +}; diff --git a/Math/2543.Check-if-Point-Is-Reachable/Readme.md b/Math/2543.Check-if-Point-Is-Reachable/Readme.md new file mode 100644 index 000000000..be3bf3987 --- /dev/null +++ b/Math/2543.Check-if-Point-Is-Reachable/Readme.md @@ -0,0 +1,15 @@ +### 2543.Check-if-Point-Is-Reachable + +我们反过来思考从(x,y)走到(1,1)的操作。将原操作逆过来就是 +1. (x,y)->(x+y, y) +2. (x,y)->(x, x+y) +3. (x,y)->(x/2, y) if (x%2==0) +4. (x,y)->(x, y/2) if (y%2==0) + +如果我们不看第三和第四条规则,我们可以发现,从(x,y)走到任意的(a,b),其中a必然写作mx+ny的形式。mx+ny必然含有gcd(x,y)。再考虑第三和第四条,我们只能除以2来降低数字的大小,所以如果gcd(x,y)包含非2的因子,我们是如论如何都无法除掉的,最终也不可能达到1. + +那么如果`gcd(x,y)`不含2的次方之外的元素,那么如何证明一定能转化为`(1,1)`呢?方案如下: +1. 如果x和y任意一个是偶数,就将该数字除以2。 +2. 如果x和y都是奇数,就转化为`(x,(x+y)/2)`或者`((x+y)/2,y)`,目的就是将数字变小。 + +以上两个操作都一定会将数字变小,除非`x=y=odd`。因为之前说过gcd(x,y)不含2的次方之外的元素,所以只有`x=y=1`是最终的归宿。 diff --git a/Math/2607.Make-K-Subarray-Sums-Equal/2607.Make-K-Subarray-Sums-Equal_v1.cpp b/Math/2607.Make-K-Subarray-Sums-Equal/2607.Make-K-Subarray-Sums-Equal_v1.cpp new file mode 100644 index 000000000..749347c81 --- /dev/null +++ b/Math/2607.Make-K-Subarray-Sums-Equal/2607.Make-K-Subarray-Sums-Equal_v1.cpp @@ -0,0 +1,34 @@ +using LL = long long; +class Solution { +public: + long long makeSubKSumEqual(vector& arr, int k) + { + int n = arr.size(); + LL ret = 0; + vectorvisited(n); + + for (int i=0; inums; + int j = i; + while (visited[j]==0) + { + visited[j] = 1; + nums.push_back(arr[j]); + j = (j+k)%n; + } + ret += helper(nums); + } + return ret; + } + + LL helper(vector&nums) + { + sort(nums.begin(), nums.end()); + int n = nums.size(); + LL ret = 0; + for (int i=0; i& arr, int k) + { + int n = arr.size(); + LL ret = 0; + vectorvisited(n); + int T = gcd(k, n); + + for (int i=0; inums; + int j = i; + while (j&nums) + { + sort(nums.begin(), nums.end()); + int n = nums.size(); + LL ret = 0; + for (int i=0; i& nums) + { + int n = nums.size(); + int g = nums[0]; + for (int i=0; i 0) + return (n - count); + + count = n; + for (int i=0; i& sick) + { + power[0] = 1; + for (int i=1; i<=n; i++) + power[i] = power[i-1]*2 % mod; + + fact[0] = 1; + for (int i=1; i<=n; i++) + fact[i] = fact[i-1]*i % mod; + + vectorgroups; + for (int i=0; i0) + ret = ret * power[x-1] % mod; + } + + return ret; + } + + LL quickPow(LL x, LL y) + { + LL ret = 1; + LL cur = x; + while (y) + { + if (y & 1) + { + ret = (LL)ret * cur % mod; + } + cur = (LL)cur * cur % mod; + y >>= 1; + } + return ret; + } + + LL inv(LL x) + { + return quickPow(x, mod - 2); + } +}; diff --git a/Math/2954.Count-the-Number-of-Infection-Sequences/Readme.md b/Math/2954.Count-the-Number-of-Infection-Sequences/Readme.md new file mode 100644 index 000000000..f0f783c25 --- /dev/null +++ b/Math/2954.Count-the-Number-of-Infection-Sequences/Readme.md @@ -0,0 +1,15 @@ +### 2954.Count-the-Number-of-Infection-Sequences + +对于两个相邻sick点之间的区间,他们被感染的次序看似很复杂,其实无非就是“感染左端点”和“感染右端点”两个选择里的随机选取。因此任意一个感染序列,都对应了一种LR的二值序列。假设区间内未被感染的点有m个,那么感染过程的序列种类(即排列)就是`2^(m-1)`。为什么是m-1?因为当只剩最后一个未感染点时,“感染左端点”和“感染右端点”这两个选择对应是同一个点。 + +此外需要注意,如果只有单边存在sick的区间(比如第一个区间或者最后一个区间),它的序列种类只有1. + +以上是一个区间的种类数。那么如何计算所有点的总的序列种类呢?假设前述的区间m1,m2,...mk的总数是n,那么这n个点的随机排列是n!种。但是,对于属于某个特定区间的点而言(比如说属于第k个区间的mk个点),它的顺序不应该是完全随机的,随意我们要再除以mk的阶乘抵消这种随机性。但是属于第k区间的点肯定也不是只有一种排列,而是有`2^(mk-1)`种方法(如果是单边存在sick的区间,那就只是1种),故需要再乘以该区间内点的排列数。 + +所以这道题的答案是 +``` +ret = n!; +for (int i=0; i& nums) + { + int n = nums.size(); + sort(nums.begin(), nums.end()); + int m; + if (n%2==1) + m = nums[n/2]; + else + m = (nums[n/2] + nums[n/2-1])/2; + + string a = nextSmallerOrEqual(to_string(m)); + string b = nextGreaterOrEqual(to_string(m)); + + long long ret = LLONG_MAX; + ret = min(ret, check(nums, stoll(a))); + ret = min(ret, check(nums, stoll(b))); + return ret; + } + + long long check(vector& nums, long long p) + { + long long sum = 0; + for (int i=0; i=0; i--) + { + int d = s[i] - '0' - carry; + if (d>=0) + { + s[i] = '0' + d; + carry = 0; + } + else + { + s[i] = '9'; + carry = 1; + } + s[m-1-i] = s[i]; + } + + if (s[0] == '0' && m>1) + return string(m-1, '9'); + else + return s; + } + + string nextGreaterOrEqual(string n) + { + int m = n.size(); + string s = n; + for (int i=0, j=m-1; i<=j; ) + s[j--] = s[i++]; + if (s >= n) return s; + + int carry = 1; + for (int i=(m-1)/2; i>=0; i--) + { + int d = s[i] - '0' + carry; + if (d<=9) + { + s[i] = '0' + d; + carry = 0; + } + else + { + s[i] = '0'; + carry = 1; + } + s[m-1-i] = s[i]; + } + + if (carry == 1) + { + s = string(m+1, '0'); + s[0] = s.back() = '1'; + return s; + } + else + return s; + } +}; diff --git a/Math/2967.Minimum-Cost-to-Make-Array-Equalindromic/Readme.md b/Math/2967.Minimum-Cost-to-Make-Array-Equalindromic/Readme.md new file mode 100644 index 000000000..93062ba42 --- /dev/null +++ b/Math/2967.Minimum-Cost-to-Make-Array-Equalindromic/Readme.md @@ -0,0 +1,5 @@ +### 2967.Minimum-Cost-to-Make-Array-Equalindromic + +根据中位数定理,Make Array Equal的最小代价就是将所有元素变成数组里的中位数(median)。本题中,我们的目标就是找到最接近中位数的回文数。可以借鉴`564. Find the Closest Palindrome`的算法,求得中位数M的next greater palindrome和next smaller palinedrome,然后选取两者较小的代价即可。 + +特别注意,单纯找nearest palinedrome是不对的。next greater palindrome和next smaller palinedrome相比,并不是更接近M就更好,而是与array里元素的分布有关。 diff --git a/Math/2968.Apply-Operations-to-Maximize-Frequency-Score/2968.Apply-Operations-to-Maximize-Frequency-Score_v1.cpp b/Math/2968.Apply-Operations-to-Maximize-Frequency-Score/2968.Apply-Operations-to-Maximize-Frequency-Score_v1.cpp new file mode 100644 index 000000000..70989f56b --- /dev/null +++ b/Math/2968.Apply-Operations-to-Maximize-Frequency-Score/2968.Apply-Operations-to-Maximize-Frequency-Score_v1.cpp @@ -0,0 +1,39 @@ +using LL = long long; +class Solution { +public: + int maxFrequencyScore(vector& nums, long long k) + { + int n = nums.size(); + sort(nums.begin(), nums.end()); + + nums.insert(nums.begin(), 0); + vectorpresum(n+1); + for (int i=1; i<=n; i++) + presum[i] = presum[i-1] + nums[i]; + + int left = 1, right = n; + while (left < right) + { + int mid = right-(right-left)/2; + if (isOK(nums, presum, k, mid)) + left = mid; + else + right = mid-1; + } + return left; + } + + bool isOK(vector&nums, vectorpresum, LL k, int len) + { + int n = nums.size()-1; + for (int i=1; i+len-1<=n; i++) + { + LL m = i+len/2; + LL j = i+len-1; + LL sum1 = nums[m]*(m-i+1) - (presum[m] - presum[i-1]); + LL sum2 = (presum[j] - presum[m-1]) - nums[m]*(j-m+1); + if (sum1+sum2<=k) return true; + } + return false; + } +}; diff --git a/Math/2968.Apply-Operations-to-Maximize-Frequency-Score/2968.Apply-Operations-to-Maximize-Frequency-Score_v2.cpp b/Math/2968.Apply-Operations-to-Maximize-Frequency-Score/2968.Apply-Operations-to-Maximize-Frequency-Score_v2.cpp new file mode 100644 index 000000000..72a3937f3 --- /dev/null +++ b/Math/2968.Apply-Operations-to-Maximize-Frequency-Score/2968.Apply-Operations-to-Maximize-Frequency-Score_v2.cpp @@ -0,0 +1,35 @@ +using LL = long long; +class Solution { +public: + int maxFrequencyScore(vector& nums, long long k) + { + int n = nums.size(); + sort(nums.begin(), nums.end()); + + nums.insert(nums.begin(), 0); + vectorpresum(n+1); + for (int i=1; i<=n; i++) + presum[i] = presum[i-1] + nums[i]; + + int j = 1; + int ret = 0; + for (int i=1; i<=n; i++) + { + while (j<=n && isOK(nums, presum, i, j, k)) + { + ret = max(ret, j-i+1); + j++; + } + } + return ret; + } + + bool isOK(vector&nums, vector&presum, int i, int j, LL k) + { + int m = (i+j)/2; + LL sum1 = (presum[j]-presum[m]) - (LL)nums[m]*(j-m); + LL sum2 = (LL)nums[m]*(m-i) - (presum[m-1]-presum[i-1]); + return sum1+sum2 <= k; + } + +}; diff --git a/Math/2968.Apply-Operations-to-Maximize-Frequency-Score/Readme.md b/Math/2968.Apply-Operations-to-Maximize-Frequency-Score/Readme.md new file mode 100644 index 000000000..f592523db --- /dev/null +++ b/Math/2968.Apply-Operations-to-Maximize-Frequency-Score/Readme.md @@ -0,0 +1,13 @@ +### 2968.Apply-Operations-to-Maximize-Frequency-Score + +#### 解法1:二分+固定滑窗 +为了通过有限的操作得到更多相等的元素,我们必然会将这些操作集中在原本已经接近的元素上。所以我们将nums排序之后,必然是选取其中的一段subarray,将其变成同一个数。显然,由中位数的性质,想将一个数组中的所有元素变成同一个元素,那么变成他们的中位数median能够使得改动之和最小。 + +我们可以二分搜索最大的subarray长度len。对于选定的len,我们在nums上走一遍固定长度的滑窗。对于每一个滑窗范围[i:j],根据median性质,我们将其变为nums[(i+j)/2]是最高效的做法。令中位数的index是m,那么我们就可以知道区间[i:j]所需要的改动就是两部分之和 `sum[m:j]-nums[m]*(j-m+1) + nums[m]*(m-i+1)-sum[i:m]`. 其中区间和可以用前缀和数组来实现。 + +如果存在一个滑窗使得其需要的改动小于等于k,那么说明len是可行的。我们可以再尝试更大的滑窗,否则尝试更小的滑窗。 + +#### 解法2:动态滑窗 +上述的思想也可以用动态滑窗来实现。固定左边界i之后,我们可以右移右边界j,直至区间[i:j]所需要的改动大于k。此时j-i就是一个可行的区间长度。然后再移动一格左边界i,找到下一个合适的j。 + +此题类似`1838.Frequency-of-the-Most-Frequent-Element` diff --git a/Math/3086.Minimum-Moves-to-Pick-K-Ones/3086.Minimum-Moves-to-Pick-K-Ones.cpp b/Math/3086.Minimum-Moves-to-Pick-K-Ones/3086.Minimum-Moves-to-Pick-K-Ones.cpp new file mode 100644 index 000000000..b8f3c195f --- /dev/null +++ b/Math/3086.Minimum-Moves-to-Pick-K-Ones/3086.Minimum-Moves-to-Pick-K-Ones.cpp @@ -0,0 +1,54 @@ +using LL = long long; +class Solution { +public: + long long minimumMoves(vector& nums, int k, int maxChanges) + { + vectorarr; + for (int i=0; i=0) + ret = min(ret, helper(arr, k-maxChanges) + maxChanges*2); + + if (k-maxChanges+1 <= m && maxChanges-1>=0) + ret = min(ret, helper(arr, k-maxChanges+1) + (maxChanges-1)*2); + + if (k-maxChanges+2 <= m && maxChanges-2>=0) + ret = min(ret, helper(arr, k-maxChanges+2) + (maxChanges-2)*2); + + if (k-maxChanges+3 <= m && maxChanges-3>=0) + ret = min(ret, helper(arr, k-maxChanges+3) + (maxChanges-3)*2); + + return ret; + } + + LL helper(vector&arr, int k) + { + if (k==0) return 0; + + int m = arr.size(); + + LL sum = 0; + for (int i=0; i>& points) + { + vector>arr(4); + + for (auto& p: points) + { + arr[0].insert(p[0]+p[1]); + arr[1].insert(p[0]-p[1]); + arr[2].insert(-p[0]+p[1]); + arr[3].insert(-p[0]-p[1]); + } + + int ret = INT_MAX/2; + for (auto& p: points) + { + arr[0].erase(arr[0].find(p[0]+p[1])); + arr[1].erase(arr[1].find(p[0]-p[1])); + arr[2].erase(arr[2].find(-p[0]+p[1])); + arr[3].erase(arr[3].find(-p[0]-p[1])); + + int ans = 0; + ans = max(ans, *prev(arr[0].end()) - *arr[0].begin()); + ans = max(ans, *prev(arr[1].end()) - *arr[1].begin()); + ans = max(ans, *prev(arr[2].end()) - *arr[2].begin()); + ans = max(ans, *prev(arr[3].end()) - *arr[3].begin()); + + ret = min(ret, ans); + + arr[0].insert(p[0]+p[1]); + arr[1].insert(p[0]-p[1]); + arr[2].insert(-p[0]+p[1]); + arr[3].insert(-p[0]-p[1]); + } + + return ret; + } +}; diff --git a/Math/3102.Minimize-Manhattan-Distances/Readme.md b/Math/3102.Minimize-Manhattan-Distances/Readme.md new file mode 100644 index 000000000..16a0ec2b2 --- /dev/null +++ b/Math/3102.Minimize-Manhattan-Distances/Readme.md @@ -0,0 +1,7 @@ +### 3102.Minimize-Manhattan-Distances + +此题的本质就是1131.Maximum-of-Absolute-Value-Expression,求二维点集里的最大曼哈顿距离。 + +我们需要维护四个有序容器,分别盛装所有点的(x+y), (x-y), (-x+y), (-x-y)。记每个容器中的最大值减去最小值为t,那么四个t中的最大值就是二维点集里的最大曼哈顿距离。 + +根据上述原理,我们遍历所有的点,每次从容器里面将一个点去除,再求此时的最大曼哈顿距离,只需要logN的时间。这样遍历N个点之后,用NlogN的时间就可以求出最优解。 diff --git a/Math/3164.Find-the-Number-of-Good-Pairs-II/3164.Find-the-Number-of-Good-Pairs-II.cpp b/Math/3164.Find-the-Number-of-Good-Pairs-II/3164.Find-the-Number-of-Good-Pairs-II.cpp new file mode 100644 index 000000000..12f85d9e4 --- /dev/null +++ b/Math/3164.Find-the-Number-of-Good-Pairs-II/3164.Find-the-Number-of-Good-Pairs-II.cpp @@ -0,0 +1,34 @@ +class Solution { +public: + long long numberOfPairs(vector& nums1, vector& nums2, int k) + { + vectornums; + for (int x: nums1) + if (x%k==0) + nums.push_back(x/k); + + unordered_map count; + for (int num : nums2) { + count[num]++; + } + long long ret = 0; + + for (int x : nums) + { + for (int d = 1; d * d <= x; ++d) + { + if (x % d == 0) + { + if (count.find(d) != count.end()) { + ret += count[d]; + } + if (d != x / d && count.find(x / d) != count.end()) { + ret += count[x / d]; + } + } + } + } + + return ret; + } +}; diff --git a/Math/3164.Find-the-Number-of-Good-Pairs-II/Readme.md b/Math/3164.Find-the-Number-of-Good-Pairs-II/Readme.md new file mode 100644 index 000000000..3ba3a7b96 --- /dev/null +++ b/Math/3164.Find-the-Number-of-Good-Pairs-II/Readme.md @@ -0,0 +1,5 @@ +### 3164.Find-the-Number-of-Good-Pairs-II + +首先,我们必然将nums1里不能被k整除的去除掉。 + +接下来,我们就是要寻找有多少个pair,使得nums1的元素能被nums2里的元素整除。看上去似乎没有比o(MN)更好的方法了。但是我们可以换一个角度就豁然开朗。我们可以枚举nums1[i]的约数,只需要sqrt(V)次。对于每个约数我们只需要在hash表里查看是否存在这样的nums2元素即可。这样时间复杂度就是`N*sqrt(V)`. diff --git a/Math/3197.Find-the-Minimum-Area-to-Cover-All-Ones-II/3197.Find-the-Minimum-Area-to-Cover-All-Ones-II.cpp b/Math/3197.Find-the-Minimum-Area-to-Cover-All-Ones-II/3197.Find-the-Minimum-Area-to-Cover-All-Ones-II.cpp new file mode 100644 index 000000000..4ca3aa9ad --- /dev/null +++ b/Math/3197.Find-the-Minimum-Area-to-Cover-All-Ones-II/3197.Find-the-Minimum-Area-to-Cover-All-Ones-II.cpp @@ -0,0 +1,101 @@ +class Solution { +public: + int minimumSum(vector>& grid) + { + int ret = INT_MAX; + + int m = grid.size(), n = grid[0].size(); + + /*************************/ + 1. 2. 3. + + ┌-┐ ┌┐┌┐ ┌-┐ + └-┘ └┘└┘ └-┘ + ┌-┐ ┌-┐ ┌┐┌┐ + └-┘ └-┘ └┘└┘ + ┌-┐ + └-┘ + + 4. 5. 6. + ┌┐┌┐┌┐ ┌ ┐┌┐ ┌┐┌ ┐ + └┘└┘└┘ │ │└┘ └┘│ │ + │ │┌┐ ┌┐│ │ + └ ┘└┘ └┘└ ┘ + /*************************/ + + for (int i=1; i>& grid, int a, int b, int c, int d) + { + if (a>c || b>d) return INT_MAX/3; + int left = INT_MAX, top = INT_MAX, bottom = INT_MIN, right = INT_MIN; + for (int i=a; i<=c; i++) + for (int j=b; j<=d; j++) + { + if (grid[i][j]==0) continue; + left = min(left, j); + right = max(right, j); + top = min(top, i); + bottom = max(bottom, i); + } + if (bottom>=top && right>=left) + return (bottom-top+1)*(right-left+1); + else + return INT_MAX/3; + } +}; diff --git a/Math/3197.Find-the-Minimum-Area-to-Cover-All-Ones-II/Readme.md b/Math/3197.Find-the-Minimum-Area-to-Cover-All-Ones-II/Readme.md new file mode 100644 index 000000000..c791a7e39 --- /dev/null +++ b/Math/3197.Find-the-Minimum-Area-to-Cover-All-Ones-II/Readme.md @@ -0,0 +1,21 @@ +### 3197.Find-the-Minimum-Area-to-Cover-All-Ones-II + +事实上将一个矩阵分成三个互不相交的子矩形,只有如下六种形式: +``` + 1. 2. 3. + + ┌-┐ ┌┐┌┐ ┌-┐ + └-┘ └┘└┘ └-┘ + ┌-┐ ┌-┐ ┌┐┌┐ + └-┘ └-┘ └┘└┘ + ┌-┐ + └-┘ + + 4. 5. 6. + ┌┐┌┐┌┐ ┌ ┐┌┐ ┌┐┌ ┐ + └┘└┘└┘ │ │└┘ └┘│ │ + │ │┌┐ ┌┐│ │ + └ ┘└┘ └┘└ ┘ +``` +对于每种形式,只有两条分割线。我们可以用o(MN)的时间遍历分割线的位置,就可以确定三个子矩阵的边界。对于每一个子矩阵,我们再遍历其中的元素,确定包含所有元素1的最小矩阵即可(同3135)。 + diff --git a/Math/3395.Subsequences-with-a-Unique-Middle-Mode-I/3395.Subsequences-with-a-Unique-Middle-Mode-I.cpp b/Math/3395.Subsequences-with-a-Unique-Middle-Mode-I/3395.Subsequences-with-a-Unique-Middle-Mode-I.cpp new file mode 100644 index 000000000..782dafd92 --- /dev/null +++ b/Math/3395.Subsequences-with-a-Unique-Middle-Mode-I/3395.Subsequences-with-a-Unique-Middle-Mode-I.cpp @@ -0,0 +1,69 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { + long long comb[1005][6]; +public: + LL getComb(int m, int n) + { + if (m& nums) + { + int n = nums.size(); + for (int i = 0; i <= n; ++i) + { + comb[i][0] = 1; + if (i==0) continue; + for (int j = 1; j <= 5; ++j) + { + comb[i][j] = comb[i - 1][j - 1] + comb[i - 1][j]; + comb[i][j] %= M; + } + } + unordered_setSet(nums.begin(), nums.end()); + + LL ret = 0; + + unordered_mapleft; + unordered_mapright; + for (int x: nums) right[x]++; + + for (int i=0; i=1) left[nums[i-1]]++; + + ret += getComb(i - left[a], 2) * getComb(n-i-1-right[a], 2) %M; + ret %= M; + + for (int b: Set) + { + if (a==b) continue; + ret += getComb(left[b],2) * getComb(right[a], 1) * getComb(n-i-1-right[a]-right[b], 1) %M; + ret += getComb(right[b],2) * getComb(left[a], 1) * getComb(i-left[a]-left[b], 1) %M; + ret %= M; + } + + for (int b: Set) + { + if (a==b) continue; + ret += getComb(left[b],1) * getComb(i-left[b]-left[a], 1) * getComb(right[a], 1) * getComb(right[b], 1) %M; + ret += getComb(right[b],1) * getComb(n-i-1-right[b]-right[a], 1) * getComb(left[a], 1) * getComb(left[b], 1) %M; + ret %= M; + } + + for (int b: Set) + { + if (a==b) continue; + ret += getComb(left[b],2) * getComb(right[a], 1) * getComb(right[b], 1) %M; + ret += getComb(right[b],2) * getComb(left[a], 1) * getComb(left[b], 1) %M; + ret %= M; + } + } + + return (getComb(n, 5) - ret + M) % M; + } +}; diff --git a/Math/3395.Subsequences-with-a-Unique-Middle-Mode-I/Readme.md b/Math/3395.Subsequences-with-a-Unique-Middle-Mode-I/Readme.md new file mode 100644 index 000000000..62ea183c9 --- /dev/null +++ b/Math/3395.Subsequences-with-a-Unique-Middle-Mode-I/Readme.md @@ -0,0 +1,37 @@ +### 3395.Subsequences-with-a-Unique-Middle-Mode-I + +所有任取5个元素的子序列有comb(n,5)个。我们枚举nums[i]=a为中间元素,考虑计算不符合条件的子序列的个数。 + +如果a出现了3次或以上,那么该序列必然是符合条件的,不用考虑。 + +如果a只出现了1次,那么该序列必然不符合条件。这样的子序列有多少个呢?只需要在左区间里任选两个非a的元素,在右区间里任选两个非a的元素。故可构造这样的子序列的个数是`comb(i-left[a],2) * comb(n-i-1-right[a], 2)`。其中left表示统计i左边的元素的频次hash,right表示统计i右边的元素的频次hash。 + +如果a出现了2次,那么必然有一种元素b出现了两次或三次,才能是不符合条件的子序列。我们可以枚举b,并分为三种情况: +1. b出现了两次,且两次都出现在同一侧(假设是左边),那么右边必须要出现另一个a,以及一个非a也非b的元素(假设是c)。故写为`b b a a c`的类型。这样的概率是 +``` +comb(left[a],2) * (right[a],1) * (n-i-1-right[a]-right[b], 1) +``` +类似的,如果两个b都在另一侧,只需将上面的left和right相反即可。 +``` +comb(right[a],2) * (left[a],1) * (i-left[a]-left[b], 1) +``` + +2. b出现了两次,且出现在两侧,那么nums[i]的左右两边分别需要再出现另一个a,以及一个非a也非b的元素(假设是c)。故写为`b a a b c`的类型。这样的概率是 +``` +comb(left[a],1) * (left[b],1) * right([b],1) * (n-i-1-right[a]-right[b], 1) +``` +或者反过来 +``` +comb(right[a],1) * (right[b],1) * left([b],1) * (i-left[a]-left[b], 1) +``` + +3. b出现了三次,占据了除两个a之外的全部位置。故写为`b b a a b`的类型。这样的概率是 +``` +comb(left[b],2) * (right[b],1) * right([a],1) +``` +或者反过来 +``` +comb(right[b],2) * (left[b],1) * left([a],1) +``` + +上述算法用了两重循环枚举a和b。另外,因为n只有1000,我们可以用n^2时间(或者只需要n*5)提前计算好所有1000以内的组合数。所以总的时间复杂度是n^2. diff --git a/Math/3405.Count-the-Number-of-Arrays-with-K-Matching-Adjacent-Elements/3405.Count-the-Number-of-Arrays-with-K-Matching-Adjacent-Elements.cpp b/Math/3405.Count-the-Number-of-Arrays-with-K-Matching-Adjacent-Elements/3405.Count-the-Number-of-Arrays-with-K-Matching-Adjacent-Elements.cpp new file mode 100644 index 000000000..3cf1a6303 --- /dev/null +++ b/Math/3405.Count-the-Number-of-Arrays-with-K-Matching-Adjacent-Elements/3405.Count-the-Number-of-Arrays-with-K-Matching-Adjacent-Elements.cpp @@ -0,0 +1,38 @@ +using LL = long long; +class Solution { +public: + const LL MOD = 1e9 + 7; + vector factorial; + vector GetFactorial(LL N) + { + vectorrets(N+1); + rets[0] = 1; + for (int i=1; i<=N; i++) + rets[i] = rets[i-1] * i % MOD; + return rets; + } + + long long quickPow(long long x, long long N) { + if (N == 0) { + return 1; + } + LL y = quickPow(x, N / 2) % MOD; + return N % 2 == 0 ? (y * y % MOD) : (y * y % MOD * x % MOD); + } + + LL comb(LL m, LL n) + { + if (n>m) return 0; + LL a = factorial[m]; + LL b = factorial[n] * factorial[m-n] % MOD; + LL inv_b = quickPow(b, (MOD-2)); + + return a * inv_b % MOD; + } + + int countGoodArrays(int n, int m, int k) + { + factorial = GetFactorial(n); + return comb(n-1,k) * m % MOD * quickPow(m-1, n-k-1) % MOD; + } +}; diff --git a/Math/3405.Count-the-Number-of-Arrays-with-K-Matching-Adjacent-Elements/Readme.md b/Math/3405.Count-the-Number-of-Arrays-with-K-Matching-Adjacent-Elements/Readme.md new file mode 100644 index 000000000..037aa8903 --- /dev/null +++ b/Math/3405.Count-the-Number-of-Arrays-with-K-Matching-Adjacent-Elements/Readme.md @@ -0,0 +1,11 @@ +### 3405.Count-the-Number-of-Arrays-with-K-Matching-Adjacent-Elements + +考虑到恰有k个位置的元素与其左边元素相同,那么将其与其左边元素“合并”后,数组里可看做只有n-k个元素,并且这些元素在相邻的位置上不重复。证明很显然,如果“合并”后依然存在相邻的相同元素,那么原数组里必然不止k处相邻的相同元素。 + +从原数组里挑出k个位置,有comb(n,k)种方案。 + +任何一种上述的方案,对于“合并”后的数组的n-k个元素,要求相邻之间不重复,有多少种方案?第一个位置有m种选择,之后每一个位置都只有m-1种选择。故总共有`m*m^(n-k-1)`种方案。 + +所以最终答案就是简单的数学表达式 `comb(n,k)*m*m^(n-k-1)`. + +因为n和k是1e5,所以不能用o(n*k)的复杂度计算组合数任何n以内的组合数。我们可以直接硬算`comb(n,k) = n!/k!/(n-k)!`. 其中阶乘的复杂度就是o(n),但是涉及到了除法,故需要介入逆元。逆元的计算公式是`inv_x = quickPow(x, (MOD-2))`. diff --git a/Math/3428.Maximum-and-Minimum-Sums-of-at-Most-Size-K-Subsequences/3428.Maximum-and-Minimum-Sums-of-at-Most-Size-K-Subsequences.cpp b/Math/3428.Maximum-and-Minimum-Sums-of-at-Most-Size-K-Subsequences/3428.Maximum-and-Minimum-Sums-of-at-Most-Size-K-Subsequences.cpp new file mode 100644 index 000000000..992e3202f --- /dev/null +++ b/Math/3428.Maximum-and-Minimum-Sums-of-at-Most-Size-K-Subsequences/3428.Maximum-and-Minimum-Sums-of-at-Most-Size-K-Subsequences.cpp @@ -0,0 +1,38 @@ +using LL = long long; +class Solution { + LL MOD = 1e9 + 7; + LL comb[100005][75]; +public: + int minMaxSums(vector& nums, int k) + { + int n = nums.size(); + for (int i = 0; i <= n; ++i) + { + comb[i][0] = 1; + if (i==0) continue; + for (int j = 1; j <= k; ++j) + { + comb[i][j] = comb[i - 1][j - 1] + comb[i - 1][j]; + comb[i][j] %= MOD; + } + } + + sort(nums.begin(), nums.end()); + + LL ret = 0; + for (int i=0; i& nums) { + + int total = accumulate(nums.begin(), nums.end(), 0); + + LL ret = 1; + for (int i=0; i<26; i++) + { + if (nums[i]!=0) + { + ret *= getComb(total, nums[i]); + if (ret >= INT_MAX/2) return INT_MAX/2; + total -= nums[i]; + } + } + return ret; + } + + string smallestPalindrome(string s, int k) + { + int n = s.size(); + vectornums(26); + for (int i=0; i&nums, string& ret, LL& curCount) + { + int total = accumulate(nums.begin(), nums.end(), 0); + if (total == 0) { + curCount+=1; + return; + } + + for (int i=0; i<26; i++) + { + if (nums[i]==0) continue; + nums[i]-=1; + LL temp = countPermutations(nums); + + if (curCount + temp < k) + { + curCount += temp; + nums[i]++; + } + else + { + ret.push_back('a'+i); + dfs(k, nums, ret, curCount); + break; + } + } + } +}; diff --git a/Math/3518.Smallest-Palindromic-Rearrangement-II/Readme.md b/Math/3518.Smallest-Palindromic-Rearrangement-II/Readme.md new file mode 100644 index 000000000..6a95c3eb1 --- /dev/null +++ b/Math/3518.Smallest-Palindromic-Rearrangement-II/Readme.md @@ -0,0 +1,35 @@ +### 3518.Smallest-Palindromic-Rearrangement-II + +很明显本题的核心就是,我们取原字符串前一半的字母,问这些字母所能组成的从小到大的第k个字典序的排列是什么。因为有重复的字符,这里要求重复的排列只算一种。 + +关于n个字母的排列,求第k大的字典序,是一个比较常规的题目了。我们只需要从按头到尾的顺序、把候选字母从小到大地进行尝试即可。比如说,我们尝试第一个位置填写a,并且计算出后面n-1个位置如果有m种排列,那么就可以根据m和k的大小关系进行决策:如果m=k,那么我们就能确定第一个位置必然是a,此时进行递归处理第二个位置即可。 + +由此,我们需要一个函数`countPermutations(vector&nums)`,表示给定一系列字母及其个数的情况下,有多少种不同的排列。其中nums是一个长度为26的数组,记录每种字符的个数。假设总共的字符个数是n。此时最容易想到的算法就是 `count = n!/(t0!)*(t1!)...*(t25!)`,其中ti表示每种字符的个数。因为n达到了1e4,其阶乘是天文数字,必然会溢出;并且这涉及到了大数的除法,我们无法保证精度。此时就应该想到第二种算法,就是 `count = C(n,t0)*C(n-t0,t1)*C(n-t0-t1,t2)*...`这里就规避了除法的精度问题,但是溢出问题依然得不到解决。 + +此时注意到k不超过1e6。一旦count的计算需要涉及到超大的阶乘数,那么必然会远远超过k。当这种情况发生时,我们其实并不需要知道count的精确数值,因为根据之前的说法,我们对字母选择的决策其实是很明显的了。因此我们在计算count甚至在计算组合数本身时,如果能预期到它很大,我们直接就返回INT_MAX/2之类的超大数来影响决策即可,而不需要计算它实际的数值。 + +此外,本题对于空间的利用需要达到极致。我们知道,可以用n^2的时间和空间来提前处理Comb(n,x)级别的组合数。通常我们只能开到comb[1000][1000]的规模。但是利用到`C(m,n)=C(m,m-n)`的性质,我们可以最大开辟数组空间达到comb[5001][2501]。代码如下: +```cpp + int comb[5001][2501]; + int getComb(int m, int n) + { + if (m& nums) + { + sort(nums.begin(),nums.end()); + int median=nums[nums.size()/2]; + int result=0; + for (int i=0; i&a, pair&b) + { + if (a.first!=b.first) + return a.first b.second; + } public: - vector merge(vector& intervals) + vector> merge(vector>& intervals) { - vector>q; - for (int i=0; i>diff; + for (auto& interval: intervals) { - q.push_back({intervals[i].start,-1}); - q.push_back({intervals[i].end,1}); + diff.push_back({interval[0], 1}); + diff.push_back({interval[1], -1}); } + + sort(diff.begin(), diff.end(), cmp); - sort(q.begin(),q.end()); - - int count=0; - int start, end; - - vectorresults; - - for (int i=0; i>rets; + int start = 0, end = 0; + int sum = 0; + for (int i=0; i 0) + { + start = diff[i].first; + } + else if (sum > 0 && sum + diff[i].second == 0) { - end = q[i].first; - results.push_back({start,end}); + end = diff[i].first; + rets.push_back({start,end}); } + sum += diff[i].second; } - - return results; + + return rets; } }; diff --git a/Others/056.Merge-Intervals/Readme.md b/Others/056.Merge-Intervals/Readme.md index 90e3bb855..58b28a487 100644 --- a/Others/056.Merge-Intervals/Readme.md +++ b/Others/056.Merge-Intervals/Readme.md @@ -1,8 +1,8 @@ ### 056.Merge-Intervals -和252类似的解题手法. +对于区间合并的题目,一般都会采用和252类似的“扫描线”算法。对于每一个区间[a,b],我们在a时刻记录+1,在b时刻记录-1. 然后我们再在时间轴上顺次遍历每一个时间点,统计这些+1和-1的总和。我们会发现当sum从0变为正数时,意味着一个merged interval的开始;当sum从正数变成0时,意味着一个merged interval的结束。这样就巧妙地把所有存在overlap的区间都合并到了一起。 -需要注意的是,此题中的有效区间长度可以为0,即[t,t]也是合法的,所以在数组q中,我们除了按时间排序之外,第二指标应该按照先1后-1的次序.即如果遇到相同的时刻,{start,1}要比{end,-1}先进行处理,这样就能顺利地包容[t,t]这样的区间. +需要注意的是,对于相同的时刻,如果同时存在多个+1或者-1,应该先处理+1后处理-1。比如[a,b]和[b,c]两个区间,在处理b时刻时,按照先+1再-1的顺序,就不会出现sum=0的情况了,也就避免了merged interval在b处断开。 -[Leetcode Link](https://leetcode.com/problems/merge-intervals) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/merge-intervals) diff --git a/Others/1067.Digit-Count-in-Range/1067.Digit-Count-in-Range.cpp b/Others/1067.Digit-Count-in-Range/1067.Digit-Count-in-Range.cpp index afdb5568a..12faf5681 100644 --- a/Others/1067.Digit-Count-in-Range/1067.Digit-Count-in-Range.cpp +++ b/Others/1067.Digit-Count-in-Range/1067.Digit-Count-in-Range.cpp @@ -5,23 +5,24 @@ class Solution { return helper(d, high)-helper(d,low-1); } - int helper(int digit, int x) + int helper(int d, int n) { - int len = to_string(x).size(); + string s = to_string(n); + int len = s.size(); int count = 0; - if (digit!=0) + if (d!=0) { for (int i=1; i<=len; i++) { int divisor = pow(10,i); - count += x/divisor * pow(10,i-1); + count += n/divisor * pow(10,i-1); - int y = (x - x/divisor*divisor)/pow(10,i-1); - if (y > digit) + int y = s[len-i]-'0'; + if (y > d) count += pow(10,i-1); - else if (y==digit) - count += (x%(int)(pow(10,i-1)))+1; + else if (y==d) + count += n%(int)(pow(10,i-1)) + 1; } } else @@ -29,13 +30,13 @@ class Solution { for (int i=1; i digit) + int y = s[len-i]-'0'; + if (y > d) count += pow(10,i-1); - else if (y==digit) - count += (x%(int)(pow(10,i-1)))+1; + else if (y==d) + count += n%(int)(pow(10,i-1)) + 1; } } diff --git a/Others/1067.Digit-Count-in-Range/Readme.md b/Others/1067.Digit-Count-in-Range/Readme.md index 3e8913225..d9c523707 100644 --- a/Others/1067.Digit-Count-in-Range/Readme.md +++ b/Others/1067.Digit-Count-in-Range/Readme.md @@ -1,8 +1,8 @@ ### 1067.Digit-Count-in-Range -如果digit不是0的话,代码完全类似于 233. Number of Digit One +如果digit不是0的话,代码完全类似于`233. Number of Digit One`,用一个helper函数计算[1,n]的数字d的出现次数,然后返回`helper(high)-helper(low-1)`. -基本思想就是逐个位数的考虑,计算出现digit的次数。比如说num=2452,digit=4 +233的基本思想就是逐个位数的考虑,计算出现digit的次数。比如说num=2452,digit=4 考虑个位时,就是计算出现XXX1的次数。如果XXX是从000到234,显然都是必然可行的。如果XXX是235的话,因为个位数是2小于4,所以不可以。因此count+=235*1; @@ -19,4 +19,4 @@ 如果digit是0的话,唯一的区别在于第一条。d前面的数字只能从1取至(XX-1),这样的话YY可以取任何数字(00-99),所以出现d的次数是 ```(XX-1)*pow(10,i-1)```.至于XX为什么不能取00呢?因为XX如果取00,接下来的第i位也是0,那么这些都是leading zeros,计算第i位的0出现的次数是没有意义的。 -[Leetcode Link](https://leetcode.com/problems/digit-count-in-range) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/digit-count-in-range) diff --git a/Others/1224.Maximum-Equal-Frequency/1224.Maximum-Equal-Frequency.cpp b/Others/1224.Maximum-Equal-Frequency/1224.Maximum-Equal-Frequency.cpp new file mode 100644 index 000000000..0e838c8ff --- /dev/null +++ b/Others/1224.Maximum-Equal-Frequency/1224.Maximum-Equal-Frequency.cpp @@ -0,0 +1,49 @@ +class Solution { +public: + int maxEqualFreq(vector& nums) + { + unordered_mapnum2freq; + for (auto num: nums) + num2freq[num]++; + + unordered_mapfreq2count; + for (auto [num, freq]:num2freq) + freq2count[freq] += 1; + + + for (int i=nums.size()-1; i>=0; i--) + { + if (freq2count.size()==1) + { + auto [freq, count] = *freq2count.begin(); + if (count == 1 || freq == 1) + return i+1; } + else if (freq2count.size()==2) + { + vector>temp(freq2count.begin(), freq2count.end()); + sort(temp.begin(), temp.end()); + + if (temp[1].first == temp[0].first + 1 && temp[1].second == 1) + return i+1; + if (temp[0].first == 1 && temp[0].second == 1) + return i+1; + } + + int x = nums[i]; + int f = num2freq[x]; + + num2freq[x] -= 1; + if (num2freq[x]==0) + num2freq.erase(x); + + freq2count[f] -= 1; + if (freq2count[f] == 0) + freq2count.erase(f); + + if (f-1>0) + freq2count[f-1] += 1; + } + + return 2; + } +}; diff --git a/Others/1224.Maximum-Equal-Frequency/Readme.md b/Others/1224.Maximum-Equal-Frequency/Readme.md new file mode 100644 index 000000000..5567411c7 --- /dev/null +++ b/Others/1224.Maximum-Equal-Frequency/Readme.md @@ -0,0 +1,13 @@ +### 1224.Maximum-Equal-Frequency + +很显然,我们会从最长的前缀(即整个数组)开始,从后往前逐一去除元素。如果某一个前缀,满足去除一个元素就可以使得剩余元素的频次相等的话,就输出该前缀长度。 + +为此,我们可以维护一个频次表记录`freq2count[f] = count`,表示当前频次为f的元素有count个。接下来分情况讨论。 + +如果freq2count里有三种或以上的频次(即三种或以上的key),那么显然不可能通过只删减一个元素就达到频次的种类降为1. + +如果freq2count里只有两种频次,有两种情况是可以实现的。1. 类似于`3,3,3,4,4,4,4`,通过删掉一个较高频次的元素,使得剩下的两种元素频次相同。2. 类似于`1,2,2,2`,较低频次的元素只出现了一次,那么删掉它就只剩下一种元素。 + +如果freq2count里只有一种频次,也有两种情况是可以实现的。1. 类似于`2,3,4`,即freq=1,那么随便哪个元素都可以。2. 类似于`3,3,3`,即count=1,那么随便哪个元素也可以。 + +根本题类似的还有`2423. Remove Letter To Equalize Frequency`. diff --git a/Others/1503.Last-Moment-Before-All-Ants-Fall-Out-of-a-Plank/1503.Last-Moment-Before-All-Ants-Fall-Out-of-a-Plank.cpp b/Others/1503.Last-Moment-Before-All-Ants-Fall-Out-of-a-Plank/1503.Last-Moment-Before-All-Ants-Fall-Out-of-a-Plank.cpp new file mode 100644 index 000000000..8f2f9dd12 --- /dev/null +++ b/Others/1503.Last-Moment-Before-All-Ants-Fall-Out-of-a-Plank/1503.Last-Moment-Before-All-Ants-Fall-Out-of-a-Plank.cpp @@ -0,0 +1,9 @@ +class Solution { +public: + int getLastMoment(int n, vector& left, vector& right) + { + sort(left.begin(), left.end()); + sort(right.begin(), right.end()); + return max(left.size()==0?0:left.back(), right.size()==0?0:n-right[0]); + } +}; diff --git a/Others/1503.Last-Moment-Before-All-Ants-Fall-Out-of-a-Plank/Readme.md b/Others/1503.Last-Moment-Before-All-Ants-Fall-Out-of-a-Plank/Readme.md new file mode 100644 index 000000000..57a956b02 --- /dev/null +++ b/Others/1503.Last-Moment-Before-All-Ants-Fall-Out-of-a-Plank/Readme.md @@ -0,0 +1,3 @@ +### 1503.Last-Moment-Before-All-Ants-Fall-Out-of-a-Plank + +很明显,任何碰撞事件都不影响宏观上蚂蚁的运动状态(只不过身份调换一下)。所以最后一个从左边掉落的蚂蚁,一定对应着初始时最靠右的、向左运动的蚂蚁。反之,最后一个从右边掉落的蚂蚁,一定对应着初始时最靠左的、向右运动的蚂蚁。 diff --git a/Greedy/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array_Greedy.cpp b/Others/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array_Greedy.cpp similarity index 100% rename from Greedy/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array_Greedy.cpp rename to Others/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array_Greedy.cpp diff --git a/Greedy/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array_SegmentTree.cpp b/Others/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array_SegmentTree.cpp similarity index 100% rename from Greedy/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array_SegmentTree.cpp rename to Others/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array_SegmentTree.cpp diff --git a/Greedy/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array/Readme.md b/Others/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array/Readme.md similarity index 100% rename from Greedy/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array/Readme.md rename to Others/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array/Readme.md diff --git a/Others/1826.Faulty-Sensor/1826.Faulty-Sensor.cpp b/Others/1826.Faulty-Sensor/1826.Faulty-Sensor.cpp index 508981d5f..b19b7020a 100644 --- a/Others/1826.Faulty-Sensor/1826.Faulty-Sensor.cpp +++ b/Others/1826.Faulty-Sensor/1826.Faulty-Sensor.cpp @@ -11,16 +11,29 @@ class Solution { } if (i>=n-1) return -1; - int flag = 1; + int flag1 = 1; for (int j=i; j>& ranges, int left, int right) + { + vectordiff(52); + for (auto range:ranges) + { + diff[range[0]]+=1; + diff[range[1]+1]+=-1; + } + + int sum = 0; + for (int i=1; i<=50; i++) + { + sum += diff[i]; + if (i>=left && i<=right && sum==0) + return false; + } + return true; + } +}; diff --git a/Others/1893.Check-if-All-the-Integers-in-a-Range-Are-Covered/Readme.md b/Others/1893.Check-if-All-the-Integers-in-a-Range-Are-Covered/Readme.md new file mode 100644 index 000000000..c797b73f7 --- /dev/null +++ b/Others/1893.Check-if-All-the-Integers-in-a-Range-Are-Covered/Readme.md @@ -0,0 +1,5 @@ +### 1893.Check-if-All-the-Integers-in-a-Range-Are-Covered + +本题的数据范围非常小,每个数字的数值只在[1,50]之间,因为我们在数轴的[1,50]范围内用差分数组/扫描线来做。对于任何区间[a,b]的两个端点,我们标记差分信息:即在数轴上的a位置标记+1,在b+1位置标记-1,这样做积分的时候,就相当于只在区间[a,b]被抬升了1. + +最终我们只要考察积分曲线在[left,right]是否有任意一点的值为0.是的话返回false,否则返回true。 diff --git a/Others/1906.Minimum-Absolute-Difference-Queries/Readme.md b/Others/1906.Minimum-Absolute-Difference-Queries/Readme.md index 6d98c38bc..77bbb4c8e 100644 --- a/Others/1906.Minimum-Absolute-Difference-Queries/Readme.md +++ b/Others/1906.Minimum-Absolute-Difference-Queries/Readme.md @@ -1,5 +1,5 @@ ### 1906.Minimum-Absolute-Difference-Queries -本题的突破口在于nums的数值大小只有100. 意味着我们可以遍历这个数值。也就是说,对于一个nums的一个子区间,我们可以试图遍历它包含了多少个1,多少个2,多少个3,直至多少个100. 那么如果计算一个区间内某个元素的频次呢?可以用线段树,但是前缀和显然更简单。 +本题正面硬刚是很难做的。突破口在于nums的数值大小只有100. 意味着我们可以遍历这个数值。也就是说,对于一个nums的一个子区间,我们可以试图遍历它包含了多少个1,多少个2,多少个3,直至多少个100. 那么如果计算一个区间内某个元素的频次呢?可以用线段树,但是前缀和显然更简单。 -我们提前预处理得到persum[k][i],表示nums[0:i]这个前缀数组里包含了多少个数值k。显然,对于[left,right]的query,里面含有k的数目就是presum[k][right]-presum[k][left-1]. 我们将[left,right]内包含的所有数值(即presum之差大于零)排个序,找邻接最小的gap。 +我们提前预处理得到presum[k][i],表示nums[0:i]这个前缀数组里包含了多少个数值k。显然,对于[left,right]的query,里面含有k的数目就是presum[k][right]-presum[k][left-1]. 我们将[left,right]内包含的所有数值(即presum之差大于零)排个序,找邻接最小的gap。 diff --git a/Others/2015.Average-Height-of-Buildings-in-Each-Segment/2015.Average-Height-of-Buildings-in-Each-Segment.cpp b/Others/2015.Average-Height-of-Buildings-in-Each-Segment/2015.Average-Height-of-Buildings-in-Each-Segment.cpp index 28700cdd3..a0c551970 100644 --- a/Others/2015.Average-Height-of-Buildings-in-Each-Segment/2015.Average-Height-of-Buildings-in-Each-Segment.cpp +++ b/Others/2015.Average-Height-of-Buildings-in-Each-Segment/2015.Average-Height-of-Buildings-in-Each-Segment.cpp @@ -2,54 +2,38 @@ class Solution { public: vector> averageHeightOfBuildings(vector>& buildings) { - vector>p; + map>Map; // pos -> {diffHeight, diffCount} for (auto build: buildings) { - int start = build[0], end = build[1], height = build[2]; - p.push_back({start, height}); - p.push_back({end, -height}); + int s = build[0], e = build[1], h = build[2]; + Map[s].first += h; + Map[s].second += 1; + Map[e].first -= h; + Map[e].second -= 1; } - sort(p.begin(), p.end()); - int count = 0; - int sum = 0; - - vector>temp; - for (int i=0; i>seg; + int totalHeight = 0, totalCount = 0; + for (auto& [pos, kv]: Map) { - int j = i; - while (j>rets; - for (int i=0; i; class Solution { public: vector amountPainted(vector>& paint) { - vectorarr; + map>>Map; // pos->{idx, flag} for (int i=0; i>>>array(Map.begin(), Map.end()); setSet; int n = paint.size(); - vectorrets(n); - for (int i=0; irets(n); + for (int i=0; i=0; i--) + { + if (flag == 0 && (directions[i]=='L' || directions[i]=='S')) + flag = 1; + if (flag == 1 && directions[i]=='R') + count++; + } + return count; + } +}; diff --git a/Others/2211.Count-Collisions-on-a-Road/Readme.md b/Others/2211.Count-Collisions-on-a-Road/Readme.md new file mode 100644 index 000000000..6f5965d4f --- /dev/null +++ b/Others/2211.Count-Collisions-on-a-Road/Readme.md @@ -0,0 +1,3 @@ +### 2211.Count-Collisions-on-a-Road + +很显然,只要左边缘有一个静止或者向右运动的车辆,那么它右边任何向左运动的车辆注定都会相撞。同理,只要右边缘有一个静止或者向左运动的车辆,那么它做边任何向右运动的车辆注定都会相撞。 diff --git a/Others/2237.Count-Positions-on-Street-With-Required-Brightness/2237.Count-Positions-on-Street-With-Required-Brightness.cpp b/Others/2237.Count-Positions-on-Street-With-Required-Brightness/2237.Count-Positions-on-Street-With-Required-Brightness.cpp new file mode 100644 index 000000000..ec7423263 --- /dev/null +++ b/Others/2237.Count-Positions-on-Street-With-Required-Brightness/2237.Count-Positions-on-Street-With-Required-Brightness.cpp @@ -0,0 +1,31 @@ +class Solution { +public: + int meetRequirement(int n, vector>& lights, vector& requirement) + { + vectordiff(n+1); + for (int i=0; ibright(n); + int sum = 0; + for (int i=0; i=requirement[i]) + ret++; + } + return ret; + } +}; diff --git a/Others/2237.Count-Positions-on-Street-With-Required-Brightness/Readme.md b/Others/2237.Count-Positions-on-Street-With-Required-Brightness/Readme.md new file mode 100644 index 000000000..456084618 --- /dev/null +++ b/Others/2237.Count-Positions-on-Street-With-Required-Brightness/Readme.md @@ -0,0 +1,3 @@ +### 2237.Count-Positions-on-Street-With-Required-Brightness + +扫描线算法的模板题。假设某盏灯的覆盖范围是[a,b],那么我们就设置差分数组diff[a]+=1和diff[b+1]-=1. 注意因为在b位置处我们是希望计入被灯光覆盖,所以-1的差分应该写在b+1这个地方。 diff --git a/Others/2245.Maximum-Trailing-Zeros-in-a-Cornered-Path/2245.Maximum-Trailing-Zeros-in-a-Cornered-Path.cpp b/Others/2245.Maximum-Trailing-Zeros-in-a-Cornered-Path/2245.Maximum-Trailing-Zeros-in-a-Cornered-Path.cpp new file mode 100644 index 000000000..aec2ac4d6 --- /dev/null +++ b/Others/2245.Maximum-Trailing-Zeros-in-a-Cornered-Path/2245.Maximum-Trailing-Zeros-in-a-Cornered-Path.cpp @@ -0,0 +1,77 @@ +class Solution { +public: + int maxTrailingZeros(vector>& grid) + { + int m = grid.size(); + int n = grid[0].size(); + + vector>grid2(m, vector(n)); + vector>grid5(m, vector(n)); + + for (int i=0; i>left2(m, vector(n)); + vector>right2(m, vector(n)); + vector>up2(m, vector(n)); + vector>down2(m, vector(n)); + vector>left5(m, vector(n)); + vector>right5(m, vector(n)); + vector>up5(m, vector(n)); + vector>down5(m, vector(n)); + + for (int i=0; i=0; j--) + { + right2[i][j] = (j==n-1?0:right2[i][j+1]) + grid2[i][j]; + right5[i][j] = (j==n-1?0:right5[i][j+1]) + grid5[i][j]; + } + } + + for (int j=0; j=0; i--) + { + down2[i][j] = (i==m-1?0:down2[i+1][j]) + grid2[i][j]; + down5[i][j] = (i==m-1?0:down5[i+1][j]) + grid5[i][j]; + } + } + + int ret = 0; + for (int i=0; i fullBloomFlowers(vector>& flowers, vector& persons) + { + unordered_mapMap; + for (auto x: flowers) + { + Map[x[0]]+=1; + Map[x[1]+1]-=1; + } + vector>diff(Map.begin(), Map.end()); + sort(diff.begin(), diff.end()); + + vector>p; + for (int i=0; irets(persons.size()); + for (int i=0; i& nums) + { + int n = nums.size(); + nums.insert(nums.begin(), 0); + + vectorpresum(n+2, 0); + for (int i=1; i<=n; i++) + presum[i] = (presum[i-1]+(LL)nums[i]) % M; + + vectorpresum2(n+2, 0); + for (int i=1; i<=n; i++) + presum2[i] = (presum2[i-1]+(LL)nums[i]*i) % M; + + stackStack; + vectornextSmaller(n+2,n+1); + vectorprevSmaller(n+2,0); + for (int i=1; i<=n; i++) + { + while (!Stack.empty() && nums[Stack.top()]>nums[i]) + { + nextSmaller[Stack.top()] = i; + Stack.pop(); + } + if (!Stack.empty()) + prevSmaller[i] = Stack.top(); + Stack.push(i); + } + + LL ret = 0; + for (int i=1; i<=n; i++) + { + LL a = prevSmaller[i], b = nextSmaller[i]; + LL x = i-a, y = b-i; + LL first = ((presum2[i-1] - presum2[a]) - (presum[i-1] - presum[a]) * a %M + M) % M; + first = first * y % M; + LL second = ((presum[b-1] - presum[i]) * (b-1+1) - (presum2[b-1] - presum2[i]) + M ) % M; + second = second * x % M; + LL mid = (LL)nums[i] * x * y % M; + + ret = (ret +(first + second + mid) * nums[i]) % M; + } + + return ret; + } +}; diff --git a/Others/2281.Sum-of-Total-Strength-of-Wizards/Readme.md b/Others/2281.Sum-of-Total-Strength-of-Wizards/Readme.md new file mode 100644 index 000000000..58991c6e0 --- /dev/null +++ b/Others/2281.Sum-of-Total-Strength-of-Wizards/Readme.md @@ -0,0 +1,31 @@ +### 2281.Sum-of-Total-Strength-of-Wizards + +根据套路,我们不会去枚举所有的subarray再找其中的weakest。相反,我们遍历每个元素将其作为weakest,再找对应的subarray。 + +假设位置在i的元素nums[i],其prevSmaller在位置a,nextSmaller在位置b。那么以nums[i]为weakest的subarray,左边界可以在a与i之间任意间隙,记做有```x = i-a```种可能;右边界可以在i与b之间的任意间隙,记做有```y = b-i```种可能。 +``` +a X X X X i X X X b +``` +也就是说,共有```x*y```种subarray符合条件。我们需要累加所有这些subarray的元素和。然后再乘以nums[i]本身,加入最终答案。 + +那么"累加所有这些subarray元素和"呢?对于上面的例子,无论subarray的右边界在哪里,nums[a+1]只会当左边界在a/a+1之间时被计入,即被统计了一次。同理,nums[a+2]会当左边界在位于a/a+1之间,或者a+1/a+2之间时被计入,即被统计了两次。依次类推,i左边的四个元素被计入的次数是: +``` +a X X X X i X X X b + 1 2 3 4 +``` +所以他们对"累加所有这些subarray元素和"的贡献就是:```M = S * y```,其中``` S = nums[a+1]*1 + nums[a+2]*2 + nums[a+3]*3 + nums[a+4]*4 ...``` 乘以y是因为无论subarray的右边界在哪个位置,nums[i]左边的这些元素都会以一样的频次被计入subarray,所以要重复y次。 + +接下来考虑如何计算S。不难构造以index为权重的前缀和```presum2[i] = sum{nums[k]*k} for k=0,1,2,..i```,那么就有```presum2[i-1]-presum2[a] = nums[a+1]*(a+1) + nums[a+2]*(a+2) + nums[a+3]*(a+3) + nums[a+4]*(a+4) ...```。显然只要将其再减去常规的区间```sum[a+1:i]*a```,就是S了。综上即有```S = presum2[i-1]-presum2[a] - (presum[i-1] - presum[a]) * a```. + +类似地,我们希望处理右边的情况,我们同样标记i右边的是三个元素被计入的次数: +``` +a X X X X i X X X b + 3 2 1 +``` +同理,这里我们希望计算``` S = nums[i+1]*3 + nums[i+2]*2 + nums[i+3]*...```。我们同样可以利用presum和presum2,具体的是```S = (presum[b-1]-presum[i])*b - (presum2[b-1]-presum2[i])```. 于是nums[i]右边的三个元素对于"累加所有这些subarray元素和"的贡献就是```S*x```. + +此外,别忘了nums[i]本身对于"累加所有这些subarray元素和"的贡献是```nums[i]*x*y```. + +所以以上三部分相加,再乘以nums[i]本身,就是以nums[i]为weakest的subarray的total strength. + +本题还有一个注意点,就是如果subarray里面如果有多个位置出现了最小值,那么哪个算weakest?为了避免重复,我们可以约定最左边出现的最小值算该subarray的weakest。所以本题中我们在预处理时,实际需要求的是prevSmallerOrEqual和nextSmaller。类似的题目见```2104.Sum-of-Subarray-Ranges```. diff --git a/Others/2302.Count-Subarrays-With-Score-Less-Than-K/2302.Count-Subarrays-With-Score-Less-Than-K.cpp b/Others/2302.Count-Subarrays-With-Score-Less-Than-K/2302.Count-Subarrays-With-Score-Less-Than-K.cpp new file mode 100644 index 000000000..6320f46ac --- /dev/null +++ b/Others/2302.Count-Subarrays-With-Score-Less-Than-K/2302.Count-Subarrays-With-Score-Less-Than-K.cpp @@ -0,0 +1,30 @@ +using LL = long long; +class Solution { +public: + long long countSubarrays(vector& nums, long long k) + { + int n = nums.size(); + nums.insert(nums.begin(), 0); + vectorpresum(n+1); + presum[0] = nums[0]; + for (int i=1; i<=n; i++) + presum[i] = presum[i-1]+nums[i]; + + LL ret = 0; + for (int i=1; i<=n; i++) + { + if (nums[i] >= k) continue; + LL left = 1, right = i; + while (left < right) + { + int mid = right-(right-left)/2; + if ((presum[i]-presum[i-mid])*(mid) < k) + left = mid; + else + right = mid-1; + } + ret += left; + } + return ret; + } +}; diff --git a/Others/2302.Count-Subarrays-With-Score-Less-Than-K/Readme.md b/Others/2302.Count-Subarrays-With-Score-Less-Than-K/Readme.md new file mode 100644 index 000000000..b4fe19759 --- /dev/null +++ b/Others/2302.Count-Subarrays-With-Score-Less-Than-K/Readme.md @@ -0,0 +1,5 @@ +### 2302.Count-Subarrays-With-Score-Less-Than-K + +根据```Count Subarrays by Element```的套路,我们不会用o(N^2)遍历数组。我们会尝试用o(N)遍历每个元素,考察它unique地对应了哪些数组。 + +因为这道题里的subarray并没有任何代表其特征的最大值、最小值之类的,所以我们可以考虑将每种subarray的最后一个元素作为代表。具体的说,如果nums[i]是符合条件的subarray的最后一个元素,那么这个subarray的起点可以在哪里?显然,长度越长,起点越靠前,权重和就越大,直至可能超过k。利用单调性,我们就能用二分搜索来确定该subarray的最大长度,即对应了有多少个符合条件的subarray。 diff --git a/Others/2327.Number-of-People-Aware-of-a-Secret/2327.Number-of-People-Aware-of-a-Secret_v1.cpp b/Others/2327.Number-of-People-Aware-of-a-Secret/2327.Number-of-People-Aware-of-a-Secret_v1.cpp new file mode 100644 index 000000000..53457cb0b --- /dev/null +++ b/Others/2327.Number-of-People-Aware-of-a-Secret/2327.Number-of-People-Aware-of-a-Secret_v1.cpp @@ -0,0 +1,27 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { +public: + int peopleAwareOfSecret(int n, int delay, int forget) + { + vectordp(n+1); // dp[i]: # of new persons who know the news in the i-th day + + dp[1] = 1; + + LL ret = 0; + for (int i=1; i<=n; i++) + { + for (int j=i+delay; jn) break; + dp[j] += dp[i]; + dp[j] %= M; + } + } + + for (int i=1; i<=n; i++) + if (i+forget > n) + ret = (ret + dp[i]) % M; + return ret; + } +}; diff --git a/Others/2327.Number-of-People-Aware-of-a-Secret/2327.Number-of-People-Aware-of-a-Secret_v2.cpp b/Others/2327.Number-of-People-Aware-of-a-Secret/2327.Number-of-People-Aware-of-a-Secret_v2.cpp new file mode 100644 index 000000000..1af588b92 --- /dev/null +++ b/Others/2327.Number-of-People-Aware-of-a-Secret/2327.Number-of-People-Aware-of-a-Secret_v2.cpp @@ -0,0 +1,30 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { +public: + int peopleAwareOfSecret(int n, int delay, int forget) + { + vectordp(n+1); // dp[i]: # of new persons who know the news in the i-th day + vectordiff(n+1); + + dp[1] = 1; + diff[1] += 1; + diff[2] += -1; + + LL ret = 0; + for (int i=1; i<=n; i++) + { + dp[i] = (dp[i-1] + diff[i] + M) % M; + + if (i+delay <= n) + diff[i+delay] += dp[i]; + if (i+forget <= n) + diff[i+forget] -= dp[i]; + } + + for (int i=1; i<=n; i++) + if (i+forget > n) + ret = (ret + dp[i]) % M; + return ret; + } +}; diff --git a/Others/2327.Number-of-People-Aware-of-a-Secret/Readme.md b/Others/2327.Number-of-People-Aware-of-a-Secret/Readme.md new file mode 100644 index 000000000..a6900b45b --- /dev/null +++ b/Others/2327.Number-of-People-Aware-of-a-Secret/Readme.md @@ -0,0 +1,21 @@ +### 2327.Number-of-People-Aware-of-a-Secret + +#### 解法1:N^2 DP +本题显然是一个DP,那么如何设计状态变量呢?如果dp[i]表示第i天的时候有多少人被感染,那么情况就比较复杂,因为有些人是刚被感染的(是被第i-delay天刚被感染的人传染的),有些人是已经感染了一段时间的,这两部分如何区分?此外,还要考虑到第i天可能有一部分人刚好恢复健康,那部分人对应的是第```i-forget```天刚被感染的人。 + +所以由上面的分析,在每一天中,“刚被感染”的人数是一个非常有用的信息量。所以我们应该将其单独定义。所以不妨令dp[i]表示第i天刚被感染的人。那么根据题意,在第i+delay直至i+forget-1天,每天新感染的人数都会固定增加dp[i],即 +```cpp +for (int j=i+delay; jn,那么dp[i]就可以贡献给ret。 + +#### 解法2: 差分数组 +从上面的dp转移方程可以看出,在一个区间内赋值同一个值,用for循环显然是低效率的,我们必然会引入差分数组。定义diff[i]表示dp[i]相比于dp[i-1]之间的增量,即```dp[i]=dp[i-1]+diff[i]```。于是我们可以用两处增量的定义,来替换整个for循环 +``` +diff[i+delay] += dp[i]; +diff[i+forget] -= dp[i]; +``` +这里需要特别注意diff的初始条件。将解法1里```dp[1]=1```翻译过来,所对应的diff的初始条件是```diff[1]+=1, diff[2]+=-1```. diff --git a/Others/2337.Move-Pieces-to-Obtain-a-String/2337.Move-Pieces-to-Obtain-a-String.cpp b/Others/2337.Move-Pieces-to-Obtain-a-String/2337.Move-Pieces-to-Obtain-a-String.cpp new file mode 100644 index 000000000..c2ab3af99 --- /dev/null +++ b/Others/2337.Move-Pieces-to-Obtain-a-String/2337.Move-Pieces-to-Obtain-a-String.cpp @@ -0,0 +1,30 @@ +class Solution { +public: + bool canChange(string start, string target) + { + int j = 0; + for (int i=0; ij) + return false; + + j++; + } + + for (int k=j; kgetDist(vector& edges, int node) + { + int n = edges.size(); + vectordist(n, -1); + int i = node; + dist[i] = 0; + while (edges[i]!=-1 && dist[edges[i]]==-1) + { + int j = edges[i]; + dist[j] = dist[i] + 1; + i = j; + } + return dist; + } + + int closestMeetingNode(vector& edges, int node1, int node2) + { + int n = edges.size(); + + vectordist1 = getDist(edges, node1); + vectordist2 = getDist(edges, node2); + + + int ret = INT_MAX; + int ans = -1; + for (int i=0; ivisited(10); + dfs(s, 0, visited); + + return ret; + } + + void dfs(string&s, int i, vector&visited) + { + int n = s.size(); + if (i>=n) + { + ret++; + return; + } + + for (int d=0; d<=9; d++) + { + if (d==0 && i==0) continue; + if (visited[d] == 1) continue; + if (d < s[i]-'0') + ret += A(10-i-1, n-1-i); + else if (d == s[i]-'0') + { + visited[d] = 1; + dfs(s, i+1, visited); + visited[d] = 0; + } + } + } + + int A(int m, int k) + { + if (k==0) return 1; + int ret = 1; + for (int i=0; i 0) + ret = max(ret+1, count); + } + return ret; + } +}; diff --git a/Others/2380.Time-Needed-to-Rearrange-a-Binary-String/Readme.md b/Others/2380.Time-Needed-to-Rearrange-a-Binary-String/Readme.md new file mode 100644 index 000000000..f78aca7dc --- /dev/null +++ b/Others/2380.Time-Needed-to-Rearrange-a-Binary-String/Readme.md @@ -0,0 +1,9 @@ +### 2380.Time-Needed-to-Rearrange-a-Binary-String + +本题的o(N)解法有着非常高的思维难度。 + +对于任何一个1而言,它的任何一次移动意味着超越了它之前的一个0.因为最终这个1要超越所有它之前的0,假设这些0的数目是count,那么说明这个1最少要移动count次。 + +但是这个1极有可能会被前面的1所阻挡。一旦这个1的前进过程被阻挡到,那么意味着从此后,它的前进只能在前一个1移动一步之后再进行。也就是说,如果前一个1移动了x次到达期待位置,这一个1只能在第x+1步之后才能到达期待位置(也就是前一个1的后一个位置)。所以最终的答案是`max(x+1,count)` + +由此我们可以从前一个1的答案递归出下一个1的答案。最终答案就是最后一个1需要多少步移动到期待位置。 diff --git a/Others/2381.Shifting-Letters-II/2381.Shifting-Letters-II.cpp b/Others/2381.Shifting-Letters-II/2381.Shifting-Letters-II.cpp new file mode 100644 index 000000000..5343e69f3 --- /dev/null +++ b/Others/2381.Shifting-Letters-II/2381.Shifting-Letters-II.cpp @@ -0,0 +1,24 @@ +class Solution { +public: + string shiftingLetters(string s, vector>& shifts) + { + int n = s.size(); + vectordiff(n+1); + for (auto& shift: shifts) + { + int start = shift[0], end = shift[1], dir = shift[2]; + int d = (dir==0)? -1:1; + diff[start]+=d; + diff[end+1]-=d; + } + + int cur = 0; + string ret; + for (int i=0; itarget) i--; } - - if (i>=0 && j<=N-1) - return true; - else - return false; + return false; } }; diff --git a/Others/2417.Closest-Fair-Integer/2417.Closest-Fair-Integer.cpp b/Others/2417.Closest-Fair-Integer/2417.Closest-Fair-Integer.cpp new file mode 100644 index 000000000..b5732b550 --- /dev/null +++ b/Others/2417.Closest-Fair-Integer/2417.Closest-Fair-Integer.cpp @@ -0,0 +1,48 @@ +class Solution { +public: + int closestFair(int n) + { + int count = 0; + int ret = helper(n, count); + if (ret!=-1) return ret; + + while (n>=0) + { + int ret = helper(n+1, count); + if (ret!=-1) return ret; + + ret = helper(n+2, count); + if (ret!=-1) return ret; + + n = n/10; + count++; + } + return -1; + } + + int helper(int n, int count) + { + int m = n; + int odd = 0, even = 0; + while (m>0) + { + if (m%2==0) + even++; + else + odd++; + m/=10; + } + + int sum = count, diff = odd-even; + if ((sum + diff) % 2 != 0) return -1; + int a = (sum + diff) / 2; // # of 0 + int b = (sum - diff) / 2; // # of 1 + if (a<0 || b<0) return -1; + + for (int i=0; i productQueries(int n, vector>& queries) + { + vectorpowers; + for (int i=0; i<32; i++) + { + if (n%2!=0) + powers.push_back(i); + n/=2; + if (n==0) break; + } + + vectorpresum(powers.size()); + for (int i=0; itwos(32*32,1); + long M = 1e9+7; + for (int i=1; i<32*32; i++) + twos[i] = twos[i-1] * 2 % M; + + vectorrets; + for (auto& query : queries) + { + int l = query[0], r = query[1]; + int diff = presum[r] - (l==0?0:presum[l-1]); + rets.push_back(twos[diff]); + } + return rets; + } +}; diff --git a/Others/2438.Range-Product-Queries-of-Powers/Readme.md b/Others/2438.Range-Product-Queries-of-Powers/Readme.md new file mode 100644 index 000000000..a7f2f5502 --- /dev/null +++ b/Others/2438.Range-Product-Queries-of-Powers/Readme.md @@ -0,0 +1,5 @@ +### 2438.Range-Product-Queries-of-Powers + +对于区间乘积,虽然我们可以借鉴区间求和的思路,采用前缀积相除的方法。但是本题涉及到对大数取模,应该注意到`(a/b) mod M != (a mod M) / (b mod M)`,而引入逆元的话,又显得比较繁琐。所以这道题的切入点应该在别处。 + +因为所有相乘的元素都是2的幂,显然我们知道这个性质,`2^a * 2^b = 2^(a+b)`,所以可以将区间的乘积转化为“指数”区间的求和,再求一次幂即可。 diff --git a/Others/2444.Count-Subarrays-With-Fixed-Bounds/2444.Count-Subarrays-With-Fixed-Bounds.cpp b/Others/2444.Count-Subarrays-With-Fixed-Bounds/2444.Count-Subarrays-With-Fixed-Bounds.cpp new file mode 100644 index 000000000..d2e050f26 --- /dev/null +++ b/Others/2444.Count-Subarrays-With-Fixed-Bounds/2444.Count-Subarrays-With-Fixed-Bounds.cpp @@ -0,0 +1,25 @@ +class Solution { +public: + long long countSubarrays(vector& nums, int minK, int maxK) + { + long long ret = 0; + int prevMin = -1, prevMax = -1, boundary = -1; + for (int i=0; imaxK) + { + boundary = i; + continue; + } + + if (nums[i] == minK) + prevMin = i; + if (nums[i] == maxK) + prevMax = i; + + ret += max(0, min(prevMin, prevMax) - boundary); + } + + return ret; + } +}; diff --git a/Others/2444.Count-Subarrays-With-Fixed-Bounds/Readme.md b/Others/2444.Count-Subarrays-With-Fixed-Bounds/Readme.md new file mode 100644 index 000000000..d3012068e --- /dev/null +++ b/Others/2444.Count-Subarrays-With-Fixed-Bounds/Readme.md @@ -0,0 +1,5 @@ +### 2444.Count-Subarrays-With-Fixed-Bounds + +本题的关键是掌握好“数subarray”的诀窍。我们通常都是固定一个端点,查看另一个端点可以在哪些地方。 + +我们考虑如果这个subarray的右边是nums[i],那么这个subarray的左端点之后必须包含至少一个minK和maxK。所以我们只要知道i左边最近的minK和maxK,取两者的较小值j,那么[j:i]就是以i结尾的、最短的符合条件的subarray。左端点从j往左延伸的话,这个subarray依然有效。但是特别注意,左端点不能延伸到非法的区域,即小于minK或者大于maxK的地方,所以实际左端点移动的范围是j-boundary,其中boundary是i之前最近的非法位置。 diff --git a/Others/2453.Destroy-Sequential-Targets/2453.Destroy-Sequential-Targets.cpp b/Others/2453.Destroy-Sequential-Targets/2453.Destroy-Sequential-Targets.cpp new file mode 100644 index 000000000..37384bb2f --- /dev/null +++ b/Others/2453.Destroy-Sequential-Targets/2453.Destroy-Sequential-Targets.cpp @@ -0,0 +1,32 @@ +using LL = long long; +class Solution { +public: + int destroyTargets(vector& nums, int space) + { + int len = 0; + int ret = 0; + + sort(nums.rbegin(), nums.rend()); + + unordered_mapdp; + + for (int i=0; i len) + { + ret = nums[i]; + len = dp[r]; + } + else if (dp[r] == len) + { + ret = nums[i]; + } + + } + + return ret; + } +}; diff --git a/Others/2453.Destroy-Sequential-Targets/Readme.md b/Others/2453.Destroy-Sequential-Targets/Readme.md new file mode 100644 index 000000000..19c7839e8 --- /dev/null +++ b/Others/2453.Destroy-Sequential-Targets/Readme.md @@ -0,0 +1,5 @@ +### 2453.Destroy-Sequential-Targets + +很明显,能够构成序列的位置必然是间隔为space的等差数列。不同的等差数列之间仅仅区别于offset,这个offset就是关于space的余数。例如,space如果是3,那么就有三种等差数列{0,3,6,9...},{1,4,7,10...},{2,5,8,11...}。 + +我们将所有的位置逆序排列,对于任意nums[i],令`r = nums[i] % space`,那么说明此位置属于offset为r的序列上,就有`dp[r] += 1`. 最终我们返回最长的dp[r]所对应的最后一个元素。 diff --git a/Others/253.Meeting-Rooms-II/253.Meeting-Rooms-II_v2.cpp b/Others/253.Meeting-Rooms-II/253.Meeting-Rooms-II_v2.cpp index f1ea07acb..a4763c5ea 100644 --- a/Others/253.Meeting-Rooms-II/253.Meeting-Rooms-II_v2.cpp +++ b/Others/253.Meeting-Rooms-II/253.Meeting-Rooms-II_v2.cpp @@ -1,34 +1,20 @@ -/** - * Definition for an interval. - * struct Interval { - * int start; - * int end; - * Interval() : start(0), end(0) {} - * Interval(int s, int e) : start(s), end(e) {} - * }; - */ class Solution { - static bool cmp1(Interval a, Interval b) - { - return a.start& intervals) - { - sort(intervals.begin(),intervals.end(),cmp1); - multisetSet; - int count=0; - int i=0; - while (iintervals[i].start) - { - Set.insert(intervals[i].end); - count = max(count,int(Set.size())); - i++; - } - Set.erase(Set.begin()); + int minMeetingRooms(vector>& intervals) { + mapMap; + for (auto interval:intervals) + { + Map[interval[0]]+=1; + Map[interval[1]]-=1; + } + + int sum = 0; + int ret = 0; + for (auto& [t,diff]: Map) + { + sum += diff; + ret = max(sum, ret); } - return count; + return ret; } }; diff --git a/Others/253.Meeting-Rooms-II/Readme.md b/Others/253.Meeting-Rooms-II/Readme.md index a8a798fb1..78e72ed80 100644 --- a/Others/253.Meeting-Rooms-II/Readme.md +++ b/Others/253.Meeting-Rooms-II/Readme.md @@ -16,11 +16,11 @@ 对于pq的数据结构,我们在C++中还可以用multiset来实现,因为它也是自动有序的。 -#### 解法2: +#### 解法2: 扫描线 -将所有{startTime,1}和{endTime,-1}加入一个数组,然后将这个数组按照时间戳排序.注意,本题中所有的有效区间的长度必须大于0,所以,{time,-1}要比{time,1}排序更靠前. +本题和732一模一样。将所有{startTime,1}和{endTime,-1}加入一个数组,然后将这个数组按照时间戳排序.注意,本题中所有的有效区间的长度必须大于0,所以,{time,-1}要比{time,1}排序更靠前. 使用一个count依时间顺序将所有的+1/-1进行累加.当count>0的时候标志着一个会议的开始,重新归为0的时候标着一个会议的结束. -[Leetcode Link](https://leetcode.com/problems/meeting-rooms-ii) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/meeting-rooms-ii) diff --git a/Others/2536.Increment-Submatrices-by-One/2536.Increment-Submatrices-by-One.cpp b/Others/2536.Increment-Submatrices-by-One/2536.Increment-Submatrices-by-One.cpp new file mode 100644 index 000000000..124e71d7a --- /dev/null +++ b/Others/2536.Increment-Submatrices-by-One/2536.Increment-Submatrices-by-One.cpp @@ -0,0 +1,54 @@ +class Diff2d { +public: + vector>f; + vector>diff; + int m,n; + Diff2d(int m, int n) + { + this->m = m; + this->n = n; + diff.resize(m+1); + f.resize(m+1); + for (int i=0; i> rangeAddQueries(int n, vector>& queries) { + Diff2d diff(n,n); + for (auto& query: queries) + { + diff.set(query[0], query[1], query[2], query[3], 1); + } + diff.compute(); + vector>rets(n, vector(n)); + for (int i=0; i& nums) + { + int n = nums.size(); + nums.insert(nums.begin(), 0); + LL ret = 0; + + vector>pre(n+5, vector(n+5)); + vector>post(n+5, vector(n+5)); + + for (int i=1; i<=n; i++) + for (int v=1; v<=n; v++) + { + if (nums[i] < v) + pre[i][v] = pre[i-1][v]+1; + else + pre[i][v] = pre[i-1][v]; + } + + for (int i=n; i>=1; i--) + for (int v=1; v<=n; v++) + { + if (nums[i] > v) + post[i][v] = post[i+1][v]+1; + else + post[i][v] = post[i+1][v]; + } + + for (int j=1; j<=n; j++) + for (int k=j+1; k<=n; k++) + { + if (nums[j]>nums[k]) + ret += pre[j-1][nums[k]] * post[k+1][nums[j]]; + } + + return ret; + } +}; diff --git a/Others/2552.Count-Increasing-Quadruplets/Readme.md b/Others/2552.Count-Increasing-Quadruplets/Readme.md new file mode 100644 index 000000000..b1ea2c163 --- /dev/null +++ b/Others/2552.Count-Increasing-Quadruplets/Readme.md @@ -0,0 +1,21 @@ +### 2552.Count-Increasing-Quadruplets + +从数据规模来看,我们可以尝试n^2的时间复杂度。这意味着我们可以遍历两个变量,然后看其他两个变量能否快速得到。 + +通过尝试,我们可以试图遍历j和k的位置。一旦确定之后,就意味着我们需要在[1:j-1]里找有多少个小于nums[k]的元素,以及在[k+1:n]里找有多少个大于nums[j]的元素。将两者乘起来,就是类似(x,j,k,x)的组合的数目。 + +接下来考虑如何求[1:j-1]里找有多少个小于nums[k]的元素。这里我们利用到另一个条件,就是nums是一个permutation,即每个元素的大小不超过n。所以我们考虑将数值的大小作为一个维度。令pre[i][v]表示前i个元素里小于v的元素有多少个。我们就有递归的表达式: +```cpp +if (nums[i] < v) + pre[i][v] = pre[i-1][v] + 1; // 多了一个nums[i]满足小于v +else + pre[i][v] = pre[i-1][v]; // nums[i]>=v,对于小于v的计数没有影响。 +``` +同理,我们可以递归算出post[i][v]表示后i个元素里大于v的元素有多少个。 + +最后,我们遍历j和k,累加结果 +```cpp +for (int j=1; j<=n; j++) + for (int k=j+1; k<=n; k++) + ret += pre[j-1][nums[k]] * post[k+1][nums[j]]; +``` diff --git a/Others/2584.Split-the-Array-to-Make-Coprime-Products/2584.Split-the-Array-to-Make-Coprime-Products.cpp b/Others/2584.Split-the-Array-to-Make-Coprime-Products/2584.Split-the-Array-to-Make-Coprime-Products.cpp new file mode 100644 index 000000000..b26f692ff --- /dev/null +++ b/Others/2584.Split-the-Array-to-Make-Coprime-Products/2584.Split-the-Array-to-Make-Coprime-Products.cpp @@ -0,0 +1,78 @@ +vectorEratosthenes(int n) // NlogNlogN +{ + vectorq(n+1,0); + vectorprimes; + for (int i=2; i<=sqrt(n); i++) + { + if (q[i]==1) continue; + int j=i*2; + while (j<=n) + { + q[j]=1; + j+=i; + } + } + for (int i=2; i<=n; i++) + { + if (q[i]==0) + primes.push_back(i); + } + return primes; +} + +class Solution { +public: + int findValidSplit(vector& nums) + { + int K = *max_element(nums.begin(), nums.end()); + vectorprimes = Eratosthenes(K); + unordered_setSet(primes.begin(), primes.end()); + + unordered_map>Map; + + for (int i=0; i x) + { + if (Map.find(x)==Map.end()) + Map[x].first = i; + Map[x].second = i; + break; + } + + if (x%p==0) + { + if (Map.find(p)==Map.end()) + Map[p].first = i; + Map[p].second = i; + } + while (x%p==0) x/=p; + } + } + + int n = nums.size(); + vectordiff(n+1); + for (auto& [k, v]: Map) + { + // cout< children * 8) return children-1; + + int d = money - children; + int k = d / 7; + int r = d % 7; + + if (r==3 && (children-k)==1) + return k-1; + else + return k; + + } +}; diff --git a/Others/2591.Distribute-Money-to-Maximum-Children/Readme.md b/Others/2591.Distribute-Money-to-Maximum-Children/Readme.md new file mode 100644 index 000000000..3e0bb1fee --- /dev/null +++ b/Others/2591.Distribute-Money-to-Maximum-Children/Readme.md @@ -0,0 +1,7 @@ +### 2591.Distribute-Money-to-Maximum-Children + +首先考虑无解的情况。当money distance(vector& nums) + { + int n = nums.size(); + unordered_map>Map; + for (int i=0; irets(n); + for (auto& [_, arr]: Map) + { + int m = arr.size(); + LL sum = 0; + for (int x: arr) + sum += abs(x - arr[0]); + rets[arr[0]] = sum; + + for (int i=0; i+1; // {pos, val} +class Solution { +public: + int minimumVisitedCells(vector>& grid) + { + int m = grid.size(), n = grid[0].size(); + + vector>dp(m, vector(n, INT_MAX/2)); + vector, greater<>>> row_diff(m); + vector, greater<>>> col_diff(n); + vector>row_set(m); + vector>col_set(n); + + for (int i=0; i0) row_set[i].insert(x); + if (x<0) row_set[i].erase(row_set[i].find(-x)); + } + while (!col_diff[j].empty() && col_diff[j].top().first == i) + { + int x = col_diff[j].top().second; + col_diff[j].pop(); + if (x>0) col_set[j].insert(x); + if (x<0) col_set[j].erase(col_set[j].find(-x)); + } + + int min_val = INT_MAX/2; + if (!row_set[i].empty()) min_val = min(min_val, *row_set[i].begin()); + if (!col_set[j].empty()) min_val = min(min_val, *col_set[j].begin()); + dp[i][j] = min_val; + if (i==0 && j==0) dp[0][0] = 0; + + int step = grid[i][j]; + if (step == 0) continue; + row_diff[i].push({j+1, dp[i][j]+1}); + row_diff[i].push({j+1+step, -(dp[i][j]+1)}); + col_diff[j].push({i+1, dp[i][j]+1}); + col_diff[j].push({i+1+step, -(dp[i][j]+1)}); + } + + if (dp[m-1][n-1] == INT_MAX/2) + return -1; + return dp[m-1][n-1] + 1; + + } +}; diff --git a/Others/2617.Minimum-Number-of-Visited-Cells-in-a-Grid/Readme.md b/Others/2617.Minimum-Number-of-Visited-Cells-in-a-Grid/Readme.md new file mode 100644 index 000000000..11245bec5 --- /dev/null +++ b/Others/2617.Minimum-Number-of-Visited-Cells-in-a-Grid/Readme.md @@ -0,0 +1,15 @@ +### 2617.Minimum-Number-of-Visited-Cells-in-a-Grid + +按照正常的DP思路,我们令dp[i][j]表示到达(i,j)的最短时间。当更新dp[i][j]的时候,我们发现它的前驱状态会有很多,包括同行里左边的若干格子(不一定相连),同列上面的若干格子(不一定相连)。我们发现遍历这些前驱状态最多需要花费o(m)和o(n)的时间,再配上遍历全体的o(mn),时间复杂度是超的。 + +我们换个DP的角度,如果已知dp[i][j],那么我们可以更新未来的一些状态,包括同行右边的若干格子(一定相连),以及同列下边的若干格子(一定相连)。但是同理,这也是`o(m)*o(mn)`的时间复杂度。但是我们发现从这个角度考虑的话,你可以更新的格子是一个连续的subarray。举个例子,如果dp[i][j]=4,grid[i][j]=3,那么意味着(i,j+1)到(i,j+3)这三个格子的dp都可以更新到5,此外(i+1,j)到(i+3,j)这三个格子的dp也都可以更新到5. 我们立马就想到了差分数组的性质,可以避免将整个区间的元素逐个更新,只需要对这个连续区间的首尾进行标记即可。 + +具体的步骤是:我们首先给每一行和每一列配一个“差分点”的优先队列。按照上面的例子,假设我们得到`dp[i][j]=4`,且`grid[i][j]=step`, 那么意味着优先队列row_diff[i]里需要添加两个差分点,分别是`{j+1, 5}`和`{j+step+1, -5}`,表示第i行从第j列开始的格子,dp值可以是5,但是从第i行第j+step+1列开始的格子,dp值不能再有5. 同理,我们对于另一个优先队列col_diff[j]也添加类似的差分点,分别是`{i+1, 5}`和`{i+step+1, -5}`. + +以上讲的是已知dp[i][j],如何更新row_diff[i]与col_diff[j]。那么我们如何计算dp[i][j]呢?我们同样需要给每一行和每一列配一个multiset,表示当前可以选取的dp值,但是显然我们只会挑最小的。举个例子,当我们遍历到(i,j)点时,有序集合row[i]会从row_diff[i]里看是否在(i,j)有差分点,有的话就从row[i]里加入或者减去相应的dp值。同理,另一个有序集合col[j]会从col_diff[j]里看是否在(i,j)有差分点,有的话就从col[j]里加入或者减去相应的dp值。最终dp[i][j]必然是在row[i]和col[j]里里面挑最小的元素(即在一堆可选的dp值里挑最小的)。 + +综上,我们对每个(i,j),先从从row_diff[i]和col_diff[j]读入差分点,更新row[i]和col[j],然后选最小值得到dp[i][j],然后往row_diff[i]和col_diff[j]再加入后续的差分点。 + +最终的答案就是dp[m-1][n-1]. + +类型的思路可以借鉴2158,2218,253。 diff --git a/Others/2647.Color-the-Triangle-Red/2647.Color-the-Triangle-Red.cpp b/Others/2647.Color-the-Triangle-Red/2647.Color-the-Triangle-Red.cpp new file mode 100644 index 000000000..f057641ed --- /dev/null +++ b/Others/2647.Color-the-Triangle-Red/2647.Color-the-Triangle-Red.cpp @@ -0,0 +1,57 @@ +class Solution { +public: + vector> colorRed(int n) + { + vector>rets; + vector>val(n+1, vector(2*n+2)); + + for (int j=1; j<=2*n-1; j+=2) + { + val[n][j] = 1; + rets.push_back({n,j}); + } + + bool forward = 1; + for (int i=n-1; i>=2; i--) + { + int j, end, delta; + if (forward) { + j = 1; end = 2*i; delta = 1; + } else { + j = 2*i-1; end = 0; delta = -1; + } + + while (j != end) + { + if (val[i][j]==0){ + if (j%2 == 1) { // a normal triangle cell. Its bottom neighbour must have been filled. + if (val[i][j-delta]==0) { + // Noramlly, the previous row neighbour must have been filled. The exception is the case when (i,j) is already the edge. + val[i][j+delta] = 1; + rets.push_back({i, j+delta}); + } + } else { // a up-side-down triangle cell. Its up neighbour must have not been filled. + if (val[i][j+delta]==0) { + // Noramlly, the next row neighbour must have not been filled. + // The exception is the case when the next row neighbour is filled by the previous row. + // Favor upper cell, as its next neighbour must be filled in the next round. + val[i-1][j-1] = 1; + rets.push_back({i-1, j-1}); + } + } + val[i][j] = 1; + } + + j+= delta; + } + + forward = !forward; + } + + if (rets.back()[0]!=1 && rets.back()[1]!=1) { + rets.push_back({1,1}); + } + + return rets; + } +}; diff --git a/Others/2647.Color-the-Triangle-Red/Readme.md b/Others/2647.Color-the-Triangle-Red/Readme.md new file mode 100644 index 000000000..b9275d5b3 --- /dev/null +++ b/Others/2647.Color-the-Triangle-Red/Readme.md @@ -0,0 +1,11 @@ +### 2647.Color-the-Triangle-Red + +纯粹的贪心找规律。 + +初始,先将最后一行从左边开始,每隔一个cell进行染色。 + +然后,从最后一行开始,逐行扫描,按照顺序和逆序交替进行检查每个cell。如果已经被染色,则跳过。下面分情况讨论: +1. 如果该cell的列编号是奇数,说明是个正三角,它的下邻居必然已经染色(我们是逐行处理)。此时如果它之前的行邻居被染色了(通常情况下必然是的),那它自身必然会”被动“染色,故只标记,不加入答案。相反,如果它的左右行邻居都还没有被染色(意味着它本身是该行的第一个),则它无法被动染色。为了最大化效率,我们不直接染色它本身,而是染色它的下一个行邻居,这样它自身也可以”被动“染色。 +2. 如果该cell的列编号是偶数,说明是个倒三角,它的上邻居必然还没有被染色。此时如果它的左右行邻居都已经被染色了(通常情况下前一个行邻居必然已经被染色,而下一个行邻居也有可能被跨行染色过),那它自身必然会”被动“染色,故只标记,不加入答案。相反,如果它的左右行邻居有任何一个没有被染色,意味着它无法被动染色。为了最大化效率,我们不直接染色它本身,而是染色它的上邻居,这样它自身也可以”被动“染色*(因为已经有一个行邻居)。 + +这种算法可能会收录{1,2},我们要将其去掉,换成{1,1}。 diff --git a/Others/2681.Power-of-Heroes/2681.Power-of-Heroes.cpp b/Others/2681.Power-of-Heroes/2681.Power-of-Heroes.cpp new file mode 100644 index 000000000..edf467eee --- /dev/null +++ b/Others/2681.Power-of-Heroes/2681.Power-of-Heroes.cpp @@ -0,0 +1,25 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { +public: + int sumOfPower(vector& nums) + { + sort(nums.begin(), nums.end()); + + LL sum = 0; + LL ret = 0; + + for (int i=0; i=1) + sum = sum * 2 % M + (LL)nums[i-1]; + + ret += mx * sum % M + mx * nums[i] % M; + ret %= M; + } + + return ret; + } +}; diff --git a/Others/2681.Power-of-Heroes/Readme.md b/Others/2681.Power-of-Heroes/Readme.md new file mode 100644 index 000000000..2a92eba56 --- /dev/null +++ b/Others/2681.Power-of-Heroes/Readme.md @@ -0,0 +1,15 @@ +### 2681.Power-of-Heroes + +我们将所有元素排序之后,假设nums[i]是所选子集的最大值,那么意味着子集的其他元素必然是在[0:i-1]里面选择。我们依次枚举最小值的话,那么所有子集的最小值的和 +``` +sum = nums[0]* 2^(i-2) + nums[1] * 2^(i-1) + ... + nums[i-1]* 2^0; +``` +别忘了nums[i]本身也可以是最小值(子集只有一个元素)。所以答案就是`sum * nums[i]^2 + nums[i] * nums[i]^2`。 + +当我们右移i,考虑新的nums[i]是所选自己的最大值时,sum依然是 +``` +sum = nums[0]* 2^(i-2) + nums[1] * 2^(i-1) + ... + nums[i-1]* 2^0; +``` +和之前的sum相比,变动就是`sum = (sum + nums[i-1]) * 2`,o(1)时间就可以更新sum。 + +所以本题的算法就是:排序后,假设nums[i]是所选子集的最大值,更新sum,然后最终答案加上`sum * nums[i]^2 + nums[i] * nums[i]^2` diff --git a/Others/2718.Sum-of-Matrix-After-Queries/2718.Sum-of-Matrix-After-Queries.cpp b/Others/2718.Sum-of-Matrix-After-Queries/2718.Sum-of-Matrix-After-Queries.cpp new file mode 100644 index 000000000..4babc2cfa --- /dev/null +++ b/Others/2718.Sum-of-Matrix-After-Queries/2718.Sum-of-Matrix-After-Queries.cpp @@ -0,0 +1,32 @@ +using LL = long long; +class Solution { +public: + long long matrixSumQueries(int n, vector>& queries) + { + vectorrow(n, -1); + vectorcol(n, -1); + LL rowLeft = n; + LL colLeft = n; + LL ret = 0; + reverse(queries.begin(), queries.end()); + for (auto & q: queries) + { + int type = q[0], idx = q[1], val = q[2]; + if (type==0) + { + if (row[idx]!=-1) continue; + row[idx] = val; + ret += rowLeft * val; + colLeft--; + } + else + { + if (col[idx]!=-1) continue; + col[idx] = val; + ret += colLeft * val; + rowLeft--; + } + } + return ret; + } +}; diff --git a/Others/2718.Sum-of-Matrix-After-Queries/Readme.md b/Others/2718.Sum-of-Matrix-After-Queries/Readme.md new file mode 100644 index 000000000..23cdc4007 --- /dev/null +++ b/Others/2718.Sum-of-Matrix-After-Queries/Readme.md @@ -0,0 +1,5 @@ +### 2718.Sum-of-Matrix-After-Queries + +很明显,后面的操作会覆盖前者,我们必然会从后往前复盘,这样已经被填充的格子就不会再更改,更方便分析。 + +假设我们第一步是将某一行填充数字a,那么我们发现,以后的任何一次列操作都只会影响到n-1个格子。再假设第二步是将某一列填充数字b,然后我们发现,以后的任何一次列操作也都只会影响到n-1个格子。所以我们只需要维护两个量来记录当前任何一行还剩多少格子需要填充,以及任何一列还剩多少格子需要填充,这样当我们复盘操作的时候,就可以知道实际该行或该列只增加了多少sum。 diff --git a/Others/2731.Movement-of-Robots/2731.Movement-of-Robots.cpp b/Others/2731.Movement-of-Robots/2731.Movement-of-Robots.cpp new file mode 100644 index 000000000..c00939397 --- /dev/null +++ b/Others/2731.Movement-of-Robots/2731.Movement-of-Robots.cpp @@ -0,0 +1,29 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { +public: + int sumDistance(vector& nums, string s, int d) + { + int n = nums.size(); + vectorpos; + for (int i=0; i& nums) + { + int n = nums.size(); + int ret = 0; + + for (int i=0; ivals(1005); + for (int j=i; j& nums) + { + int n = nums.size(); + + int ret = 0; + for (int i=0; i=0; j--) + { + if (nums[j]==nums[i]+1) + { + prevInvalid = j; + break; + } + if ((nums[j]>nums[i]+1) && prevLargerThanOne==-1) + prevLargerThanOne = j; + } + + int afterInvalid = n; + int afterLargerThanOne = n; + for (int j=i+1; jnums[i]+1) && afterLargerThanOne==n) + afterLargerThanOne = j; + } + + int a = i - prevInvalid; + int b = afterInvalid - i; + int c = i - max(prevInvalid, prevLargerThanOne); + int d = min(afterInvalid, afterLargerThanOne) - i; + + ret += max(0, a*b - c*d); + } + + return ret; + } +}; diff --git a/Others/2763.Sum-of-Imbalance-Numbers-of-All-Subarrays/2763.Sum-of-Imbalance-Numbers-of-All-Subarrays_v3.cpp b/Others/2763.Sum-of-Imbalance-Numbers-of-All-Subarrays/2763.Sum-of-Imbalance-Numbers-of-All-Subarrays_v3.cpp new file mode 100644 index 000000000..6f1bcce7b --- /dev/null +++ b/Others/2763.Sum-of-Imbalance-Numbers-of-All-Subarrays/2763.Sum-of-Imbalance-Numbers-of-All-Subarrays_v3.cpp @@ -0,0 +1,56 @@ +class Solution { +public: + int sumImbalanceNumbers(vector& nums) + { + int n = nums.size(); + + vectorprevInvalid(n, -1); + vectorval2pos(1005,-1); + for (int i=0; iafterInvalid(n+1, n); + for (int i=0; i<1005; i++) val2pos[i] = n; + for (int i=n-1; i>=0; i--) + { + afterInvalid[i] = min(val2pos[nums[i]], val2pos[nums[i]+1]); + val2pos[nums[i]] = i; + } + + vectorprevLargerThanOne(n, -1); + stackst; + for (int i=0; iafterLargerThanOne(n, n); + while (!st.empty()) st.pop(); + for (int i=n-1; i>=0; i--) + { + while (!st.empty() && nums[st.top()] <= nums[i]+1) + st.pop(); + if (!st.empty()) afterLargerThanOne[i] = st.top(); + st.push(i); + } + + int ret = 0; + for (int i=0; inums[i]+1·)。同时从i往后推,找到第一个afterLargerThanOne的位置。这样就有`(i-prevLargerThanOne)*(afterLargerThanOne-i)`个必然包含nums[i]的subarray,使得nums[i]在排序后是最后一个(因为没有其他合法元素可以排在它后面)。我们记做`c*d`. + +所以`a*b-c*d`就是nums[i]可以贡献的subarray的个数,使得它在这些subarray里面贡献的是一个合法的index. + +特别注意,之前计算的prevLargerThanOne不能往前超越prevInvalid;同理afterLargerThanOne不能往后超越afterInvalid,这是因为`c*d`的计数前提依然是valid(即不能有与nums[i]相同数值或者相同数值+1的元素存在)。 + +#### 解法3: +上述的解法2是`o(N^2)`的复杂度。运用预处理可以进一步优化到o(N)。 + +我们用Hash表(记录val->pos)从前往后扫一遍,就可以知道任何nums[i]的prevInvalid的位置。 + +我们再利用单调栈的计数从前往后扫一遍,就可以知道任何nums[i]的prevLargerThanOne的位置,具体做法和求prevGreaterElement几乎一样。 diff --git a/Others/2768.Number-of-Black-Blocks/2768.Number-of-Black-Blocks.cpp b/Others/2768.Number-of-Black-Blocks/2768.Number-of-Black-Blocks.cpp new file mode 100644 index 000000000..1e20e5394 --- /dev/null +++ b/Others/2768.Number-of-Black-Blocks/2768.Number-of-Black-Blocks.cpp @@ -0,0 +1,35 @@ +using LL = long long; +class Solution { + int n; +public: + LL encode(LL x, LL y) + { + return x*n + y; + } + + vector countBlackBlocks(int m, int n, vector>& coordinates) + { + unordered_mapMap; + this->n = n; + + int count = 0; + for (auto& c: coordinates) + { + int x = c[0], y = c[1]; + for (int i=x-1; i<=x; i++) + for (int j=y-1; j<=y; j++) + { + if (i>=0 && i=0 && jrets(5); + for (auto [k,v]: Map) + rets[v]+=1; + + rets[0] = LL(m-1)*LL(n-1) - rets[1] - rets[2] - rets[3] -rets[4]; + + return rets; + } +}; diff --git a/Others/2768.Number-of-Black-Blocks/Readme.md b/Others/2768.Number-of-Black-Blocks/Readme.md new file mode 100644 index 000000000..a10a8cbc6 --- /dev/null +++ b/Others/2768.Number-of-Black-Blocks/Readme.md @@ -0,0 +1,7 @@ +### 2768.Number-of-Black-Blocks + +为了不重不漏地数block,我们需要定义cell与block的关系。我们令每个block左上角的cell作为该block的“代表”,那么数block就转换成了数cell。 + +对于每个black cell,我们设想它可能属于block。显然,它最多属于四个不同的block,这些block对应的“代表”就是(x-1,y-1),(x,y-1),(x-1,y),(x,y).于是我们只需要给这四个block(的代表)各自加上一票即可。最终,每个block(的代表)所得的票数就意味着它所包含的black cell的个数。 + +注意,在右边界和下边界的cell是不能代表一个合法的block的。 diff --git a/Others/2772.Apply-Operations-to-Make-All-Array-Elements-Equal-to-Zero/2772.Apply-Operations-to-Make-All-Array-Elements-Equal-to-Zero.cpp b/Others/2772.Apply-Operations-to-Make-All-Array-Elements-Equal-to-Zero/2772.Apply-Operations-to-Make-All-Array-Elements-Equal-to-Zero.cpp new file mode 100644 index 000000000..8abebd858 --- /dev/null +++ b/Others/2772.Apply-Operations-to-Make-All-Array-Elements-Equal-to-Zero/2772.Apply-Operations-to-Make-All-Array-Elements-Equal-to-Zero.cpp @@ -0,0 +1,22 @@ +class Solution { +public: + bool checkArray(vector& nums, int k) + { + if (k==1) return true; + int n = nums.size(); + vectordiff(n+1, 0); + + int cur = 0; + for (int i=0; inums[i]) return false; + int delta = nums[i] - cur; + if (delta > 0 && i+k < n) + diff[i+k] -= delta; + cur += delta; + } + + return cur+diff[n-1] == nums[n-1]; + } +}; diff --git a/Others/2772.Apply-Operations-to-Make-All-Array-Elements-Equal-to-Zero/Readme.md b/Others/2772.Apply-Operations-to-Make-All-Array-Elements-Equal-to-Zero/Readme.md new file mode 100644 index 000000000..ff794a194 --- /dev/null +++ b/Others/2772.Apply-Operations-to-Make-All-Array-Elements-Equal-to-Zero/Readme.md @@ -0,0 +1,13 @@ +### 2745.Construct-the-Longest-New-String + +我们将问题反过来看,就是问是否能将一个长度为n的全0数组,通过若干次的“k-size subarray +1”操作变成nums。 + +显然,对于第一个元素,想实现0->nums[0],相差`delta=nums[0]-0`,我们必须通过将[0:k-1]整体增加`delta`来实现。 + +此时观察第二个元素,已经是nums[0]了。如果这个数值大于nums[1],显然我们无法通过任何只增不减的操作实现变换,故返回false。否则意味着我们还差`delta=nums[1]-nums[0]`,必须需要将[1:k]整体提升`delta`。 + +从上面的过程我们已经发现规律。从前往后遍历时,每个位置i可能已经有了某个数值(受之前操作的影响)。为了实现与预定目标nums[i]的匹配(假设相差delta),那必须进行操作将[i:i+k-1]整体提升delta。而这些操作会影响到后续位置的数值。我们通过当前值与预期的nums[i]的大小关系,可以判定是否无解。 + +最终,如果最后一个元素的当前值与nums[n-1]完全一致时,说明整套操作能够实现目标。 + +很明显,对于区间整体的增减,我们需要差分数组来标记。比如,我们要将[i:i+k-1]整体提升d,那么只需要标记`diff[i]+=d, diff[i+k]-=d`即可. 从零开始,一路前往后累积diff差分即可恢复每个位置上的数值。 diff --git a/Others/2808.Minimum-Seconds-to-Equalize-a-Circular-Array/2808.Minimum-Seconds-to-Equalize-a-Circular-Array.cpp b/Others/2808.Minimum-Seconds-to-Equalize-a-Circular-Array/2808.Minimum-Seconds-to-Equalize-a-Circular-Array.cpp new file mode 100644 index 000000000..1b50322d7 --- /dev/null +++ b/Others/2808.Minimum-Seconds-to-Equalize-a-Circular-Array/2808.Minimum-Seconds-to-Equalize-a-Circular-Array.cpp @@ -0,0 +1,24 @@ +class Solution { +public: + int minimumSeconds(vector& nums) + { + unordered_map>Map; + for (int i=0; i& nums, int m) + { + if (nums.size()<=2) return true; + for (int i=1; i=m) return true; + return false; + } +}; diff --git a/Others/2811.Check-if-it-is-Possible-to-Split-Array/2811.Check-if-it-is-Possible-to-Split-Array_v2.cpp b/Others/2811.Check-if-it-is-Possible-to-Split-Array/2811.Check-if-it-is-Possible-to-Split-Array_v2.cpp new file mode 100644 index 000000000..a07c81f19 --- /dev/null +++ b/Others/2811.Check-if-it-is-Possible-to-Split-Array/2811.Check-if-it-is-Possible-to-Split-Array_v2.cpp @@ -0,0 +1,21 @@ +class Solution { +public: + bool canSplitArray(vector& nums, int m) + { + int n = nums.size(); + nums.insert(nums.begin(), 0); + vectorpresum(n+1); + for (int i=1; i<=n; i++) + presum[i] = presum[i-1]+nums[i]; + + vector>dp(n+1, vector(n+1, 1)); + for (int len=3; len<=n; len++) + for (int i=1; i+len-1<=n; i++) + { + int j = i+len-1; + dp[i][j] = (dp[i][j-1]&&(presum[j-1]-presum[i-1]>=m)) || (dp[i+1][j]&&(presum[j]-presum[i]>=m)); + } + + return dp[1][n]; + } +}; diff --git a/Others/2811.Check-if-it-is-Possible-to-Split-Array/Readme.md b/Others/2811.Check-if-it-is-Possible-to-Split-Array/Readme.md new file mode 100644 index 000000000..592839086 --- /dev/null +++ b/Others/2811.Check-if-it-is-Possible-to-Split-Array/Readme.md @@ -0,0 +1,15 @@ +### 2811.Check-if-it-is-Possible-to-Split-Array + +#### 解法1 +本题的线性解法其实非常简单,只需要检查是否存在两个连续的元素之和大于等于m即可。 + +充分性:假设存在,那么我们在每一步切除的过程中保留上述两个元素,就能使操作不断进行下去。 + +必要性:假设不存在,那么无论用什么方法,当我们切到只剩三个元素的块时,根据题意一定无法继续切下去。 + +#### 解法2 +有动态规划的N^2解法。令dp[i][j]表示区间[i:j]是否可以根据规则切到最后。那么我们就有转移方程: +``` +dp[i][j] = (dp[i+1][j] && sum[i+1:j]>=m) || (dp[i][j-1] && sum[i:j-1]>=m) +``` +我们用二维循环,从小窗口的dp推导出大窗口的dp,最终返回dp[0][n-1]. diff --git a/Others/2818.Apply-Operations-to-Maximize-Score/2818.Apply-Operations-to-Maximize-Score.cpp b/Others/2818.Apply-Operations-to-Maximize-Score/2818.Apply-Operations-to-Maximize-Score.cpp new file mode 100644 index 000000000..9ce734fb6 --- /dev/null +++ b/Others/2818.Apply-Operations-to-Maximize-Score/2818.Apply-Operations-to-Maximize-Score.cpp @@ -0,0 +1,90 @@ +using LL = long long; +LL M = 1e9+7; +using PII=pair; +class Solution { +public: + LL quickMul(LL x, LL N) { + if (N == 0) { + return 1; + } + LL y = quickMul(x, N / 2) % M; + return N % 2 == 0 ? (y * y % M) : (y * y % M * x % M); + } + + vectorEratosthenes(int n) + { + vectorq(n+1,0); + for (int i=2; i<=n; i++) + { + if (q[i]>=1) continue; + q[i] = 1; + int j=i*2; + while (j<=n) + { + q[j]+=1; + j+=i; + } + } + return q; + } + + int maximumScore(vector& nums, int k) + { + LL n = nums.size(); + int MAX = *max_element(nums.begin(), nums.end()); + vectors = Eratosthenes(MAX); + + vectorscores(n); + for (int i=0; iprevLarger(n, -1); + stackStack; + for (int i=0; inextLarger(n, n); + while (!Stack.empty()) Stack.pop(); + for (int i=n-1; i>=0; i--) + { + while (!Stack.empty() && scores[Stack.top()] <= scores[i]) + Stack.pop(); + if (!Stack.empty()) + nextLarger[i] = Stack.top(); + Stack.push(i); + } + + vector temp(n); + for (int i=0; i= t) + { + ret = ret * quickMul(num, t) % M; + k -= t; + } + else + { + ret = ret * quickMul(num, k) % M; + k = 0; + } + if (k==0) break; + } + + return ret; + } +}; diff --git a/Others/2818.Apply-Operations-to-Maximize-Score/Readme.md b/Others/2818.Apply-Operations-to-Maximize-Score/Readme.md new file mode 100644 index 000000000..e7274f5dd --- /dev/null +++ b/Others/2818.Apply-Operations-to-Maximize-Score/Readme.md @@ -0,0 +1,17 @@ +### 2818.Apply-Operations-to-Maximize-Score + +这道题是很多套路和知识点的大杂烩。 + +首先,根据题意,我们要在n^2个区间里挑选k个区间。这n^2个区间里,有的x可以很大,有的x会很小。我们不会去遍历所有这n^2个区间、再根据他们的x排序。相比之下,x的取值范围只有n种(即nums里的n个元素),通过遍历x来枚举区间的效率更高。 + +显然,我们必然会贪心地使用“x最大”的那些区间,我们将nums数组里最大元素记做nums[i]。那么有多少区间的`highest prime score`是nums[i]呢?假设每个元素的`prime score`我们都已经提前计算好了,记做scores[i],那么我们寻找i左边第一个大于等于scores[i]的位置left,以及右边第一个大于scores[i]的位置right,那么符合条件的区间的左边界就可以在(left,i)之间任意选取,右边界就可以在(i,right)之间任意选取,任意配对之后总共的区间个数就是`(i-left)*(right-i)`. 也就是说,在最终选取的k个区间里,我们优先选取这`(i-left)*(right-i)`个区间,因为每次都可以让结果乘以nums[i](全局最大的x)。 + +以此类推,我们再贪心地使用“x第二大”的那些区间,记做nums[j]。同理计算出有多少个区间满足scores[j]是最大元素。当k还没选够时,我们就会优先使用这些区间。 + +再找nums第三大元素、第四大元素... 直至把k个区间都用完。 + +以上就是本题的大致思路。其中还有不少小问题。比如 + +1. 怎么预处理得到scores数组?可以用埃氏筛的思路,在根据某个质因数向上筛除合数时,可以顺便给该合数增1,就可以记录下每个数的distinct质因数的个数了。 +2. 如何求`previous larger or equal number`和`next larger number`,这是单调栈的经典应用了。 +3. 假设某个数x对应的区间有P个,那么我们就要`ret *= x^P`,其中P可能很大,所以需要调用快速幂的libary。 diff --git a/Others/2857.Count-Pairs-of-Points-With-Distance-k/2857.Count-Pairs-of-Points-With-Distance-k.cpp b/Others/2857.Count-Pairs-of-Points-With-Distance-k/2857.Count-Pairs-of-Points-With-Distance-k.cpp new file mode 100644 index 000000000..79b1b0eb2 --- /dev/null +++ b/Others/2857.Count-Pairs-of-Points-With-Distance-k/2857.Count-Pairs-of-Points-With-Distance-k.cpp @@ -0,0 +1,30 @@ +class Solution { +public: + int countPairs(vector>& coordinates, int k) + { + int ret = 0; + unordered_mapMap; + for (int i=0; i& prices) + { + int n = prices.size(); + vectorarr(n); + for (int i=0; iMap; + for (int i=0; i& nums) + { + int n = nums.size(); + unordered_mapright; + for (int i=0; ileft; + for (int i=n-1; i>=0; i--) + left[nums[i]] = i; + + vectordiff(n); + for (auto [k,v]: left) + { + diff[left[k]]+=1; + diff[right[k]]-=1; + } + + int count = 0; + int sum = 0; + for (int i=0; iarr; + while (a>0) + { + arr.push_back(a%2); + a/=2; + } + if (arr[i]==1) + { + LL b = 0; + for (int j=arr.size()-1; j>i; j--) + b = b*2+arr[j]; + ret += b * pow(2, i); + b = 0; + for (int j=i-1; j>=0; j--) + b = b*2+arr[j]; + ret += b+1; + } + else + { + LL b = 0; + for (int j=arr.size()-1; j>i; j--) + b = b*2+arr[j]; + ret += b * pow(2, i); + } + + if (ret > k) return false; + } + + return true; + } +}; diff --git a/Others/3007.Maximum-Number-That-Sum-of-the-Prices-Is-Less-Than-or-Equal-to-K/Readme.md b/Others/3007.Maximum-Number-That-Sum-of-the-Prices-Is-Less-Than-or-Equal-to-K/Readme.md new file mode 100644 index 000000000..a77e2165e --- /dev/null +++ b/Others/3007.Maximum-Number-That-Sum-of-the-Prices-Is-Less-Than-or-Equal-to-K/Readme.md @@ -0,0 +1,13 @@ +### 3007.Maximum-Number-That-Sum-of-the-Prices-Is-Less-Than-or-Equal-to-K + +首先我们很容易看出此题的答案具有单调性。答案越大,就有越多的bit 1能够被计入;反之,被计入的bit 1会越少。所以整体的框架就是一个二分,核心就是制定一个上限A,想问从1到A的所有自然数的二进制表达里,总共有多少个bit 1出现在第x位、第2x位、... + +这个问题和`LC 233.Number-of-Digit-One`非常相似。我们不会固定一个数,再数里面有多少个bit 1;而是相反的策略,对于某位bit,我们计算有多少个数会在该bit的值是1. + +我们令上限A表达为`XXX i YYY`,考虑从1到A总共多少个自然数在第i位bit上的值是1呢? + +如果A[i]==0,那么高位部分可以是任意000 ~ (XXX-1),低位部分可以是任意 000 ~ 999。两处的任意组合,都可以保证整体的数值不超过上限A。这样的数有`XXX * 2^t`种,其中t表示`YYY`的位数。此外没有任何数可以满足要求。 + +如果A[i]==1,那么高位部分可以是任意000 ~ (XXX-1),低位部分可以是任意 000 ~ 999。两处的任意组合,都可以保证整体的数值不超过上限A。同样,这样的数有`XXX * 2^t`种,其中t表示`YYY`的位数。。此外,当高位恰好是`XXX`时,低位可以是从000~YYY,这样就额外有`YYY+1`种。 + +以上就统计了从1-A的所有自然数有多少个在第i位bit是1。我们再循环处理下一个的bit(隔x位)即可。 diff --git a/Others/3009.Maximum-Number-of-Intersections-on-the-Chart/3009.Maximum-Number-of-Intersections-on-the-Chart.cpp b/Others/3009.Maximum-Number-of-Intersections-on-the-Chart/3009.Maximum-Number-of-Intersections-on-the-Chart.cpp new file mode 100644 index 000000000..e9e27cee7 --- /dev/null +++ b/Others/3009.Maximum-Number-of-Intersections-on-the-Chart/3009.Maximum-Number-of-Intersections-on-the-Chart.cpp @@ -0,0 +1,34 @@ +class Solution { +public: + int maxIntersectionCount(vector& y) + { + int n = y.size(); + mapMap; + for (int i=1; i& nums, int k, vector>& edges) + { + vector>diff; + for (int x: nums) + { + diff.push_back({(x^k)-x, x}); + } + + sort(diff.rbegin(), diff.rend()); + + LL max_diff = 0; + LL total_diff = 0; + for (int i=0; i+1 max_diff) + { + max_diff = total_diff; + } + } + + LL total = 0; + for (int x: nums) total += x; + + return total + max_diff; + + } +}; diff --git a/Others/3068.Find-the-Maximum-Sum-of-Node-Values/Readme.md b/Others/3068.Find-the-Maximum-Sum-of-Node-Values/Readme.md new file mode 100644 index 000000000..824bc4941 --- /dev/null +++ b/Others/3068.Find-the-Maximum-Sum-of-Node-Values/Readme.md @@ -0,0 +1,5 @@ +### 3068.Find-the-Maximum-Sum-of-Node-Values + +此题本质和树的结构没有任何关系,假设a与b相连,b与c相连。那么我们对(a,b)和(b,c)同时操作的话,就相当于直接对(a,c),而b没有变化。故题意转换一下,就是在数组nums里,每次可以任意选取两个元素同时与k进行异或操作(不用考虑edge的关系)。问最终可以得到的最大的数组之和是多少。 + +考虑到任何一个元素,对k进行偶数次的异或操作,等同于没有进行操作。所以我们只会对每个元素进行最多一次操作。显然,我们会选择那些“增量”更大的元素进行操作。所以我们将每个元素的增量`(x^k)-x`按照从大到小的顺序排序,理论上将所有增量为正的元素进行操作,得到的和肯定最大。但是题意要求每次同时对两个元素进行操作,所以我们依次尝试前2个、前4个、前6个...所有取前偶数个元素的方案进行尝试。挑选其中“增量之和”最大的。最终的答案,就是sum(nums)加上最大的“增量之和”。 diff --git a/Others/3169.Count-Days-Without-Meetings/3169.Count-Days-Without-Meetings.cpp b/Others/3169.Count-Days-Without-Meetings/3169.Count-Days-Without-Meetings.cpp new file mode 100644 index 000000000..a208395fd --- /dev/null +++ b/Others/3169.Count-Days-Without-Meetings/3169.Count-Days-Without-Meetings.cpp @@ -0,0 +1,33 @@ +class Solution { +public: + int countDays(int days, vector>& meetings) + { + mapMap; + for (auto& meeting: meetings) + { + int a = meeting[0], b = meeting[1]; + Map[a]++; + Map[b+1]--; + } + Map[days+1]+=1; + + int sum = 0; + int cur = 1; + int total = 0; + for (auto [k,v]:Map) + { + if (sum==0 && sum+v>0) + { + total += k-cur; + } + else if (sum>0 && sum+v==0) + { + cur = k; + } + + sum += v; + } + + return total; + } +}; diff --git a/Others/3169.Count-Days-Without-Meetings/Readme.md b/Others/3169.Count-Days-Without-Meetings/Readme.md new file mode 100644 index 000000000..71ed74e6c --- /dev/null +++ b/Others/3169.Count-Days-Without-Meetings/Readme.md @@ -0,0 +1,7 @@ +### 3169.Count-Days-Without-Meetings + +很明显这是一道扫描线的题目。对于每个区间[s,e]的会议,我们记录`Map[s]++`和`Map[e+1]--`. 最后我们将Map里的所有key按照时间顺序走一遍,累加差分值至count。 + +当count从正数降为零时,说明没有任何会议,此时记录当前日期cur。当count从零变成正数时,说明出现了会议,那么就将当前日期减去cur,即为最近一段没有会议的时长。 + +最终统计所有无会议的时长之和。 diff --git a/Others/3400.Maximum-Number-of-Matching-Indices-After-Right-Shifts/3400.Maximum-Number-of-Matching-Indices-After-Right-Shifts.cpp b/Others/3400.Maximum-Number-of-Matching-Indices-After-Right-Shifts/3400.Maximum-Number-of-Matching-Indices-After-Right-Shifts.cpp new file mode 100644 index 000000000..2fc898808 --- /dev/null +++ b/Others/3400.Maximum-Number-of-Matching-Indices-After-Right-Shifts/3400.Maximum-Number-of-Matching-Indices-After-Right-Shifts.cpp @@ -0,0 +1,19 @@ +class Solution { +public: + int maximumMatchingIndices(vector& nums1, vector& nums2) + { + int ret = 0; + int n = nums1.size(); + unordered_map>Map; + for (int i=0; iscores(n); + for (int i=0; i& nums) + { + unordered_map>Map; + int n = nums.size(); + for (int i=0; i assignElements(vector& groups, vector& elements) + { + int n = *max_element(groups.begin(), groups.end()); + vectorarr(n+1, -1); + + for (int j=0; jn) continue; + + if (arr[x0]!=-1) continue; + + int x = x0; + while (x<=n) + { + if (arr[x]==-1) + arr[x] = j; + x+=x0; + } + } + + vectorrets; + for (int g: groups) + rets.push_back(arr[g]); + return rets; + } +}; diff --git a/Others/3447.Assign-Elements-to-Groups-with-Constraints/Readme.md b/Others/3447.Assign-Elements-to-Groups-with-Constraints/Readme.md new file mode 100644 index 000000000..3e6d9e611 --- /dev/null +++ b/Others/3447.Assign-Elements-to-Groups-with-Constraints/Readme.md @@ -0,0 +1,7 @@ +### 3447.Assign-Elements-to-Groups-with-Constraints + +突破点在于groups里的元素的数值不超过1e5.在这个范围是,如果枚举所有1的倍数,然后枚举所有2的倍数,然后枚举所有3的倍数,直至枚举n的倍数,那么总共的时间复杂度是`n+n/2+n/3+...n/n = n*(1+1/2+1/3+...1/n)`.这个级数虽然不收敛,但是它是趋近于nlog(n)的。所以本题可以用暴力枚举。 + +所以本题的算法很简单。我们开一个长度为1e5的数组assign,来记录每个自然数最早能被哪个element所assign。我们依次考察element里的每个元素,比如说elements[j]=x,然后枚举x的所有倍数(直至1e5),比如说kx,那样就有`assign[kx] = j`,当然根据题意,我们对于每个assign我们只更新一次。 + +最后根据groups的数值,从assgin里把答案拷贝过去即可。 diff --git a/Others/357.Count-Numbers-with-Unique-Digits/357.Count-Numbers-with-Unique-Digits.cpp b/Others/357.Count-Numbers-with-Unique-Digits/357.Count-Numbers-with-Unique-Digits.cpp new file mode 100644 index 000000000..063b3287e --- /dev/null +++ b/Others/357.Count-Numbers-with-Unique-Digits/357.Count-Numbers-with-Unique-Digits.cpp @@ -0,0 +1,19 @@ +class Solution { +public: + int countNumbersWithUniqueDigits(int n) + { + if (n==0) return 1; + int ret = 1; + for (int len = 1; len <= n; len++) + ret += A(10, len) - A(9,len-1); + return ret; + } + + int A(int m, int n) + { + int ret = 1; + for (int i=0; ileftL(n+2); + vectorrightT(n+2); + for (int i=1; i<=n; i++) + leftL[i] = leftL[i-1]+(s[i-1]=='L'); + for (int i=n; i>=1; i--) + rightT[i] = rightT[i+1]+(s[i+1]=='T'); + + long long count = 0; + for (int i=1; i<=n; i++) { + if (s[i]=='C') + count += leftL[i]*rightT[i]; + } + + long long ret = 0; + + long long CT = 0; + long long T = 0; + for (int i=n; i>=1; i--) { + T += s[i]=='T'; + if (s[i]=='C') CT += T; + ret = max(ret, count+CT); + } + + long long LC = 0; + long long L= 0; + for (int i=1; i<=n; i++) { + L += s[i]=='L'; + if (s[i]=='C') LC += L; + ret = max(ret, count+LC); + } + + for (int i=1; i<=n-1; i++) { + ret = max(ret, count+leftL[i+1]*rightT[i]); + } + + return ret; + } +}; diff --git a/Others/3628.Maximum-Number-of-Subsequences-After-One-Inserting/Readme.md b/Others/3628.Maximum-Number-of-Subsequences-After-One-Inserting/Readme.md new file mode 100644 index 000000000..ecdca9f4e --- /dev/null +++ b/Others/3628.Maximum-Number-of-Subsequences-After-One-Inserting/Readme.md @@ -0,0 +1,16 @@ +### 3628.Maximum-Number-of-Subsequences-After-One-Inserting + +这道题的迷惑性在于很容易误导用DP来解。我们容易设计成dp[i][j][k]表示前i个元素里、用了j次insertion(0,1),能出现多少个以字符k结尾(L,C,T)的合法subsequence。但是这个dp值实际上允许我们在任意位置插入,并且统计了所有的subsequence的总和。这与题意要求是不一样。题目里要求的是确定一个insertion位置的情况下,能出现多少个合法的subsequence。在所有插入的位置中,再选一个最大的结果。 + +本题的正解是利用subsequence的长度仅有3,以中心字符为C为入手点,考察左边L和右边T的分布与组合。具体分情况讨论如下: + +1. 不考虑任何插入操作。我们预处理得到leftL[i]表示i左边有多少个L,rightT[i]表示i右边有多少个T。于是对于每个字符如果s[i]='C',那么`leftL[i]*rightT[i]`就是以i为中心的subsequence的个数。然后全局累加,结果记为count。 + +2. 考虑插入L,我们需要计算以这个新插入的L开头的合法子序列的个数。假设我们将L插在i的左边,那么等价于计算i右边(包括i)存在多少个CT。我们可以从右往左遍历,对于每个C,统计它右边有多少个T,这样就可以加入CT的统计。 + +3. 同理考虑插入T,我们需要计算以这个新插入的T结尾的合法子序列的个数。假设我们将T插在i的右边,那么等价于计算i左边(包括i)存在多少个LC。我们可以从左往右遍历,对于每个C,统计它左边有多少个L,这样就可以加入LC的统计。 + +4. 最后考虑插入C。这个比较简单,假设我们将C插在i的左边,那么`leftL[i]*rightT[i-1]`就是以该C为中心的subsequence的个数。 + +注意,最终我们需要取2,3,4三种答案的最大值,与第一种情况的count相加。 + diff --git a/Others/3640.Trionic-Array-II/3640.Trionic-Array-II.cpp b/Others/3640.Trionic-Array-II/3640.Trionic-Array-II.cpp new file mode 100644 index 000000000..91a00bf10 --- /dev/null +++ b/Others/3640.Trionic-Array-II/3640.Trionic-Array-II.cpp @@ -0,0 +1,56 @@ +using ll = long long; +class Solution { +public: + vector>split(vector&arr) { + int n = arr.size(); + vector> result; + int i = 0; + while (i < n) { + int j = i; + while (j + 1 < n && arr[j + 1] < arr[j]) { + j++; + } + if (j > i) { + result.push_back({i, j}); + } + i = j+1; + } + return result; + } + + long long maxSumTrionic(vector& nums) { + int n = nums.size(); + vector>arr = split(nums); + + ll ret = LLONG_MIN/2; + + for (int i=0; i=n) continue; + if(nums[x-1]>=nums[x]) continue; + if(nums[y+1]<=nums[y]) continue; + + ll sum = nums[x-1]; + ll maxSum1 = nums[x-1]; + for (int j=x-2; j>=0; j--) { + if (nums[j]>=nums[j+1]) break; + sum += nums[j]; + maxSum1 = max(maxSum1, sum); + } + + sum = nums[y+1]; + ll maxSum2 = nums[y+1]; + for (int j=y+2; j& nums, vector>& queries) { + int n = nums.size(); + vectorm(n, 1); + + int B = 320; + + vector>small_k_queries[B+1]; + + for (auto q: queries) { + int l = q[0], r = q[1], k = q[2], v = q[3]; + if (k>B) { + for (int i=l; i<=r; i+=k) { + m[i] = m[i] * v % M; + } + } else { + small_k_queries[k].push_back(q); + } + } + + for (int k=1; k<=B; k++) { + if (small_k_queries[k].empty()) continue; + vectordiff(n+1, 1); + for (auto&q: small_k_queries[k]) { + int l = q[0], r = q[1], k = q[2], v = q[3]; + r = (r-l)/k*k + l; + diff[l] = diff[l] * v % M; + if (r+k<=n) diff[r+k] = diff[r+k] * inv(v) % M; + } + + vectorthis_round_m(n+1, 1); + for (int i=0; i=k?this_round_m[i-k]:1) * diff[i] % M; + } + + for (int i=0; i n) break; + if (lower <= n && upper >= n) + { + count += (n - lower +1); + break; + } + else + { + count += exp; + } + + exp *= 10; + } + + return count; + } +}; + diff --git a/Others/440.K-th-Smallest-in-Lexicographical-Order/Readme.md b/Others/440.K-th-Smallest-in-Lexicographical-Order/Readme.md new file mode 100644 index 000000000..244cbdcfc --- /dev/null +++ b/Others/440.K-th-Smallest-in-Lexicographical-Order/Readme.md @@ -0,0 +1,31 @@ +### 440.K-th-Smallest-in-Lexicographical-Order + +本题初看和```386.Lexicographical-Numbers```非常相似,但解法大不相同.在386题中,因为需要将按照字典序从小到大所有的元素打印出来,所以可以用构造法把这些数都找出来.但本题中,如果K很大,要将从1到K个的字典序元素都生成是很费时的. + +此题可以用递归的思路来拆解每个digit,逐步将k化小。我们先考察所有以1开头的数字`1xx..xx`,它们必然在字典序里是最靠前的一拨。如果它们的个数count1小于k,那么就意味着答案的首数字必然不会是1,我们就可以`k-=count1`。我们再考察所有以2开头的数字`2xx..xx`,同理此时它们必然在字典序里是最靠前的一拨。如果它们的个数count1大于k,说明我们的答案的首数字必然就是2! + +接下来我们同理,处理第二位数字。我们先考察所有以20开头的数字`20xx..xx`,如果它们的个数count20小于k,那么就意味着答案的首两位数字必然不会是20,我们就可以`k-=count20`。我们再考察所有以21开头的数字`21xx..xx`,同理此时它们必然在字典序里是最靠前的一拨。如果它们的个数count21小于k,说明答案的首两位数字必然不会是21,我们继续`k-=count21`。直至我们发现`22xx..xx`的个数count22大于k,说明最终答案的首二位数字就是22. + +所以我们可以重复调用主函数`FindKthNumberBeginWith(prefix, k)`,表示求以prefix为前缀的第k个字典序排列的元素。如果k为0,就输出prefix本身。 + +代码的流程大致如下: +```cpp +int FindKthNumberBeginWith(prefix,k) +{ + if (k==0) return prefix; + + for i=0 to 9 + { + count = TotalNumbersBeginWith(prefix+[i], n); + if (count>Set; + mapMap; MyCalendarThree() { @@ -9,23 +9,23 @@ class MyCalendarThree { int book(int start, int end) { - Set.insert({start,1}); - Set.insert({end,-1}); + Map[start]+=1; + Map[end]-=1; int count=0; - int result=0; - for (auto a: Set) + int ret=0; + for (auto& [t, diff]: Map) { - count+=a.second; - result = max(result,count); + count += diff; + ret = max(ret, count); } - return result; + return ret; } }; /** * Your MyCalendarThree object will be instantiated and called as such: - * MyCalendarThree obj = new MyCalendarThree(); - * int param_1 = obj.book(start,end); + * MyCalendarThree* obj = new MyCalendarThree(); + * int param_1 = obj->book(start,end); */ diff --git a/Others/732.My-Calendar-III/Readme.md b/Others/732.My-Calendar-III/Readme.md index 3e0066e7d..e91eecae4 100644 --- a/Others/732.My-Calendar-III/Readme.md +++ b/Others/732.My-Calendar-III/Readme.md @@ -1,10 +1,7 @@ ### 732.My-Calendar-III -此题有奇思妙解. +本题和253一模一样。典型的扫描线算法。 -我们设计一个顺序的multiset>Set,每次调用我们就往里面放置{start,1}和{end,-1}.然后遍历这个集合,按照从小到大的顺序更新一个计数器,遇到1就加一,遇到-1就减一. +我们用一个按key有序的Map。每次调用book函数时,我们就操作```Map[start]+=1```和```Map[end]-=1```。然后遍历这个Map的key,按照时间轴的顺序累加差分值,即遇到1就加一,遇到-1就减一。这样就得到每个时刻有多少并行的会议。最终输出其中的最大值。 -奇妙的就是,你这样可以实时得到的,就是当前k booking的状态.遍历完之后这个计数器的历史最大值就是答案. - - -[Leetcode Link](https://leetcode.com/problems/my-calendar-iii) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/my-calendar-iii) diff --git a/Others/798.Smallest-Rotation-with-Highest-Score/798.Smallest-Rotation-with-Highest-Score_v1.cpp b/Others/798.Smallest-Rotation-with-Highest-Score/798.Smallest-Rotation-with-Highest-Score_v1.cpp index c844d044c..e33cf2345 100644 --- a/Others/798.Smallest-Rotation-with-Highest-Score/798.Smallest-Rotation-with-Highest-Score_v1.cpp +++ b/Others/798.Smallest-Rotation-with-Highest-Score/798.Smallest-Rotation-with-Highest-Score_v1.cpp @@ -3,20 +3,20 @@ class Solution { int bestRotation(vector& A) { int N = A.size(); - vectordiff(N,0); + vectordiff(N+1,0); for (int i=0; i=N次会重复之前的结果,我们只需要开长度为N+1的diff数组即可(多留一个是为了在某些情况下设置“下降沿”的时候保证diff不越界,本身diff[N]的数值并不会用到)。至于为什么取模之后能AC,是因为题目问的是最大score所对应的rotation index k,随意乱写任何值的diff[0]都不会改变sum的变化趋势,也就不会影响对最优k的判定。 + [Leetcode Link](https://leetcode.com/problems/smallest-rotation-with-highest-score) diff --git a/Others/853.Car-Fleet/853.Car-Fleet.cpp b/Others/853.Car-Fleet/853.Car-Fleet.cpp new file mode 100644 index 000000000..766111ce3 --- /dev/null +++ b/Others/853.Car-Fleet/853.Car-Fleet.cpp @@ -0,0 +1,24 @@ +class Solution { +public: + int carFleet(int target, vector& position, vector& speed) + { + int N= position.size(); + + vector>q; + for (int i=0; i=0; i--) + { + double T = (target-q[i].first)*1.0/q[i].second; + int j = i-1; + while (j>=0 && (target-q[j].first)*1.0/q[j].second <= T) + j--; + count++; + i = j+1; + } + return count; + } +}; diff --git a/Others/853.Car-Fleet/Readme.md b/Others/853.Car-Fleet/Readme.md new file mode 100644 index 000000000..67f7f3e3e --- /dev/null +++ b/Others/853.Car-Fleet/Readme.md @@ -0,0 +1,5 @@ +### 853.Car-Fleet + +我们判断一辆车是否会与前车相撞,只要考察该车到达终点的时间是否小于前车到达终点的时间。所以最终能组成一个fleet的车辆,必然是一段连续区间的车,且该区间里的所有车都会撞上此区间最右边的领头车。 + +所以本题的算法是,从右往左遍历每辆车A,考察它作为领头车的话,它后面会有连续几辆车能在到达终点前撞上它,这个区间就是一个fleet。如果发现后面的某辆车B不会撞上它,那么B就是另一个fleet的领头车了。 diff --git a/Others/855.Exam-Room/Readme.md b/Others/855.Exam-Room/Readme.md deleted file mode 100644 index 8a69b11cd..000000000 --- a/Others/855.Exam-Room/Readme.md +++ /dev/null @@ -1,10 +0,0 @@ -### 855.Exam-Room.cpp - -此题考虑什么样的数据结构最合适。我们需要存放什么呢?其实只要存放每个人的位置就可以了。人与人的间隔要不要单独存储呢?其实可以不必。线性的数组在这里就足够用了。虽然每次插入位置的搜索是线性的,内存的移动也会费时间,但似乎实际的效率还不错。 - -每次进来一个人,我们线性扫描现有人的位置,查找最大的间隔。另外,头和尾的间隔需要另行考虑。确定最大间隔,就能确定插入的位置,直接用数组的insert命令即可。 - -在人离开的时候,也是直接用lower_bound确定位置的迭代器,再删除迭代器即可。 - - -[Leetcode Link](https://leetcode.com/problems/exam-room) \ No newline at end of file diff --git a/Others/978.Longest-Turbulent-Subarray/978.Longest-Turbulent-Subarray.cpp b/Others/978.Longest-Turbulent-Subarray/978.Longest-Turbulent-Subarray.cpp index 6887c93bc..a2c3238b9 100644 --- a/Others/978.Longest-Turbulent-Subarray/978.Longest-Turbulent-Subarray.cpp +++ b/Others/978.Longest-Turbulent-Subarray/978.Longest-Turbulent-Subarray.cpp @@ -11,23 +11,23 @@ class Solution { else if (A[i]& amount) + { + priority_queuepq; + for (int x: amount) + { + if (x!=0) + pq.push(x); + } + int ret = 0; + while (pq.size()>=2) + { + int a = pq.top(); + pq.pop(); + int b = pq.top(); + pq.pop(); + ret++; + if (a-1>0) + pq.push(a-1); + if (b-1>0) + pq.push(b-1); + } + + if (pq.size()==1) + ret += pq.top(); + return ret; + + } +}; diff --git a/Priority_Queue/2335.Minimum-Amount-of-Time-to-Fill-Cups/2335.Minimum-Amount-of-Time-to-Fill-Cups_v2.cpp b/Priority_Queue/2335.Minimum-Amount-of-Time-to-Fill-Cups/2335.Minimum-Amount-of-Time-to-Fill-Cups_v2.cpp new file mode 100644 index 000000000..8af54e3e3 --- /dev/null +++ b/Priority_Queue/2335.Minimum-Amount-of-Time-to-Fill-Cups/2335.Minimum-Amount-of-Time-to-Fill-Cups_v2.cpp @@ -0,0 +1,13 @@ +class Solution { +public: + int fillCups(vector& amount) + { + sort(amount.rbegin(), amount.rend()); + int total = accumulate(amount.begin(), amount.end(), 0); + if (amount[0]>=total/2+1) + return amount[0]; + else + return (total+1)/2; + + } +}; diff --git a/Priority_Queue/2335.Minimum-Amount-of-Time-to-Fill-Cups/Readme.md b/Priority_Queue/2335.Minimum-Amount-of-Time-to-Fill-Cups/Readme.md new file mode 100644 index 000000000..0c6571fa7 --- /dev/null +++ b/Priority_Queue/2335.Minimum-Amount-of-Time-to-Fill-Cups/Readme.md @@ -0,0 +1,9 @@ +### 2335.Minimum-Amount-of-Time-to-Fill-Cups + +如果我们将三种元素组成一个序列,一次取俩,每次抓取尽量要是不同的元素。言下之意就是希望将不同元素尽量间隔分布。于是这道题的本质就是```767.Reorganize-String```。 + +#### 解法1:模拟 +基本思想是,每个回合尽量使用当前剩余频次最多的两种元素,这样就能尽量最大化剩下元素的种类数,方便尽可能持久地凑成pair。所以我们用一个大顶堆的pq,每次读取最大的两个元素,各自减一后再放回去,直至pq里面只剩下一种元素。 + +#### 解法2:抽屉原理 +假设元素的总个数是total,如果最多元素的种类占据了二分之一以上,即```amount[0]>=total/2+1```,那么我们就无法保证间隔排列,所以必须操作amount[0]次,每次要么搭配一个其他种类的元素,要么自己单独被取出。反之,如果```amount[0]; +class Solution { +public: + long long kSum(vector& nums, int k) + { + LL sum = 0; + for (int x: nums) + if (x>0) sum += x; + if (k==1) return sum; + + for (int& x: nums) + x = abs(x); + sort(nums.begin(), nums.end()); + + priority_queue, greater<>>pq; + pq.push({nums[0],0}); + + for (int t=0; t; +class Solution { +public: + int mostBooked(int n, vector>& meetings) + { + priority_queue, greater<>>busy; + priority_queue, greater<>>free; + for (int i=0; icount(n); + + for (int i=0; i>& intervals) + { + priority_queue, greater<>>pq; + + sort(intervals.begin(), intervals.end()); + + int ret = 0; + + for (int i=0; i= intervals[i][0]) + { + pq.push(intervals[i][1]); + } + else + { + pq.pop(); + pq.push(intervals[i][1]); + } + + ret = max(ret, (int)pq.size()); + } + + return ret; + } +}; diff --git a/Priority_Queue/2406.Divide-Intervals-Into-Minimum-Number-of-Groups/2406.Divide-Intervals-Into-Minimum-Number-of-Groups_v2.cpp b/Priority_Queue/2406.Divide-Intervals-Into-Minimum-Number-of-Groups/2406.Divide-Intervals-Into-Minimum-Number-of-Groups_v2.cpp new file mode 100644 index 000000000..564b86952 --- /dev/null +++ b/Priority_Queue/2406.Divide-Intervals-Into-Minimum-Number-of-Groups/2406.Divide-Intervals-Into-Minimum-Number-of-Groups_v2.cpp @@ -0,0 +1,21 @@ +class Solution { +public: + int minMeetingRooms(vector>& intervals) + { + mapMap; // {time, diff} + for (auto& interval: intervals) + { + Map[interval[0]]+=1; + Map[interval[1]+1]-=1; + } + + int sum = 0; + int ret = 0; + for (auto& [time, diff]: Map) + { + sum += diff; + ret = max(ret, sum); + } + return ret; + } +}; diff --git a/Priority_Queue/2406.Divide-Intervals-Into-Minimum-Number-of-Groups/Readme.md b/Priority_Queue/2406.Divide-Intervals-Into-Minimum-Number-of-Groups/Readme.md new file mode 100644 index 000000000..f430d8846 --- /dev/null +++ b/Priority_Queue/2406.Divide-Intervals-Into-Minimum-Number-of-Groups/Readme.md @@ -0,0 +1,13 @@ +### 2406.Divide-Intervals-Into-Minimum-Number-of-Groups + +此题和`253.Meeting-Rooms-II`一模一样。 + +#### 解法1:PQ贪心 +我们将所有的会议按照开始时间排序。对于会议1而言,它占用了一个房间,那么该房间必然在某个t时刻(也就是该会议结束时间)之后才有空。然后我们查看下一个会议的开始时间,如果它在t之后,那么它就可以继续用那个房间;否则只好单开一个房间,这就意味着目前有两个房间正在被使用,我们需要了解的依然是它们各自available的时刻,那个先结束就可以更早地被重复利用。显然,我们就需要一个PQ来盛装所有正在被使用的房间,按照结束时间从早到晚排序。 + +所以基本的算法思想就是:对于下一个会议,查看PQ里是否有房间可以释放使用。是的话就PQ弹出该房间,并重置结束时刻再放入PQ;否则就往PQ里新增一个房间。整个过程中PQ.size()的最大值就是答案。 + +#### 解法2:扫描线 +利用扫描线算法可以轻松地得到最多有多少个区间同时存在。 + +这里需要注意的是此题允许的那个区间的左右端点是重合的。如果我们在某一个时刻累加所有的新增会议和结束会议,那么可能会得到互相抵消的结果。解决方案很巧妙,我们将所有的双闭区间处理为左闭右开的区间。对于[left,right],我们在`left`时刻加入会议,在`right+1`时刻退出会议即可。 diff --git a/Priority_Queue/2542.Maximum-Subsequence-Score/2542.Maximum-Subsequence-Score.cpp b/Priority_Queue/2542.Maximum-Subsequence-Score/2542.Maximum-Subsequence-Score.cpp new file mode 100644 index 000000000..25a21096c --- /dev/null +++ b/Priority_Queue/2542.Maximum-Subsequence-Score/2542.Maximum-Subsequence-Score.cpp @@ -0,0 +1,33 @@ +using LL = long long; +class Solution { +public: + long long maxScore(vector& nums1, vector& nums2, int k) + { + int n = nums1.size(); + vector>arr; + for (int i=0; i, greater<>>pq; + LL minVal = INT_MAX; + LL sum = 0; + LL ret = 0; + for (int i=0; ik) + { + sum -= pq.top(); + pq.pop(); + } + if (pq.size()==k) + ret = max(ret, sum * minVal); + } + return ret; + + } +}; diff --git a/Priority_Queue/2542.Maximum-Subsequence-Score/Readme.md b/Priority_Queue/2542.Maximum-Subsequence-Score/Readme.md new file mode 100644 index 000000000..e04ff8898 --- /dev/null +++ b/Priority_Queue/2542.Maximum-Subsequence-Score/Readme.md @@ -0,0 +1,7 @@ +### 2542.Maximum-Subsequence-Score + +此题和`1383.Maximum Performance of a Team`一模一样。 + +对于此类题目,我们无法同时遍历两个变量。必然是遍历一个变量,然后找另一个变量的最优值。 + +我们观察第二个因子`min(...)`有一个特性,就是随着数量的增多,其值是单调的递减。于是我们想到,将nums按照降序排列,依次考察它的前缀,就可以依次得到所有可能的min值。如果固定了一个团队中的最小值x,那么这个团队里的最大和是多少?显然是将所有大于等于x的成员里挑最大的k个相加即可,一个PQ即可实现。 diff --git a/Priority_Queue/2599.Make-the-Prefix-Sum-Non-negative/2599.Make-the-Prefix-Sum-Non-negative.cpp b/Priority_Queue/2599.Make-the-Prefix-Sum-Non-negative/2599.Make-the-Prefix-Sum-Non-negative.cpp new file mode 100644 index 000000000..ffeb7dd93 --- /dev/null +++ b/Priority_Queue/2599.Make-the-Prefix-Sum-Non-negative/2599.Make-the-Prefix-Sum-Non-negative.cpp @@ -0,0 +1,30 @@ +class Solution { +public: + int makePrefSumNonNegative(vector& nums) + { + priority_queuepq; + long long sum = 0; + int ret = 0; + + for (int x: nums) + { + if (x >= 0) + sum += x; + else if (sum + x >=0) + { + sum += x; + pq.push(abs(x)); + } + else + { + pq.push(abs(x)); + sum += x; + int y = pq.top(); + pq.pop(); + sum += y; + ret++; + } + } + return ret; + } +}; diff --git a/Priority_Queue/2599.Make-the-Prefix-Sum-Non-negative/Readme.md b/Priority_Queue/2599.Make-the-Prefix-Sum-Non-negative/Readme.md new file mode 100644 index 000000000..0299a2d5d --- /dev/null +++ b/Priority_Queue/2599.Make-the-Prefix-Sum-Non-negative/Readme.md @@ -0,0 +1,13 @@ +### 2599.Make-the-Prefix-Sum-Non-negative + +本题是典型的反悔贪心。 + +我们一路维护前缀和sum。假设遇到当前的元素x,那么分一下几种情况。 + +场景1,如果x是正数,那么无脑收录。 + +场景2,如果x是负数,并且sum+x>=0,那么我们也会贪心地将其收入前缀,从而减少一次扔元素的操作。 + +场景3,就是如果x是负数,且sum+x<0,那么我们别我他法,必须将x扔走。但是将x扔走的同时,能捞一些什么好处呢?假设之前有某个负数y没有被扔走而是收录进了前缀,并且绝对值的y大于x,那么显然将y扔走比将x扔走更合算,并且将y扔走可以保证可以将x顺利保留在前缀里。 + +所以我们需要将场景2里所有收录过的负数,按照绝对值大小放入一个PQ。当遇到场景3的时候,我们将PQ里的最大值代替x去扔掉(如果大于x的话),这样同样用一次操作,可以获取最大的收益(尽可能地提升前缀和)。 diff --git a/Priority_Queue/2931.Maximum-Spending-After-Buying-Items/2931.Maximum-Spending-After-Buying-Items.cpp b/Priority_Queue/2931.Maximum-Spending-After-Buying-Items/2931.Maximum-Spending-After-Buying-Items.cpp new file mode 100644 index 000000000..aef3ff4e0 --- /dev/null +++ b/Priority_Queue/2931.Maximum-Spending-After-Buying-Items/2931.Maximum-Spending-After-Buying-Items.cpp @@ -0,0 +1,31 @@ +using PII = pair; +class Solution { +public: + long long maxSpending(vector>& values) + { + int m = values.size(); + int n = values[0].size(); + + priority_queue, greater<>>pq; + vectorp(m, n-1); + for (int i=0; i0`. + +所以我们将所有shop里当前available的物品放入一个小顶堆,每次取最小值与当前的d相乘即可。取完一个最小值,就把它对应的shop的下一件available的物品放入PQ。直至取完所有m*n件物品。 diff --git a/Priority_Queue/3645.Maximum-Total-from-Optimal-Activation-Order/3645.Maximum-Total-from-Optimal-Activation-Order.cpp b/Priority_Queue/3645.Maximum-Total-from-Optimal-Activation-Order/3645.Maximum-Total-from-Optimal-Activation-Order.cpp new file mode 100644 index 000000000..69bbc9ba6 --- /dev/null +++ b/Priority_Queue/3645.Maximum-Total-from-Optimal-Activation-Order/3645.Maximum-Total-from-Optimal-Activation-Order.cpp @@ -0,0 +1,51 @@ +using PII = pair; + +class Solution { +public: + long long maxTotal(vector& value, vector& limit) { + int n = value.size(); + + unordered_setpurged; + unordered_mapMap; // value -> How many active elements whose limit is the key + + vector arr; + for (int i = 0; i < n; ++i) { + arr.push_back({value[i], limit[i]}); + } + sort(arr.begin(), arr.end(), [](PII&a, PII&b){ + if (a.second!=b.second) + return a.second < b.second; + else + return a.first > b.first; + }); + priority_queue, greater> pq; + + long long ret = 0; + int active = 0; + + for (auto& [V,L] : arr) { + if (purged.find(L)!=purged.end()) + continue; + + if (L > active) { + ret += V; + pq.push(V); + active += 1; + Map[L]+=1; + + purged.insert(active); + int temp = Map[active]; + Map[active] = 0; + active -= temp; + } else if (!pq.empty() && V > pq.top()) { + ret -= pq.top(); + pq.pop(); + ret += V; + pq.push(V); + Map[L]+=1; + } + } + + return ret; + } +}; diff --git a/Priority_Queue/3645.Maximum-Total-from-Optimal-Activation-Order/Readme.md b/Priority_Queue/3645.Maximum-Total-from-Optimal-Activation-Order/Readme.md new file mode 100644 index 000000000..7ab3a1723 --- /dev/null +++ b/Priority_Queue/3645.Maximum-Total-from-Optimal-Activation-Order/Readme.md @@ -0,0 +1,10 @@ +### 3649.Maximum-Total-from-Optimal-Activation-Order + +基本的思路是反悔贪心。 + +我们将所有元素按照limit从小到大排序(limit相同的情况下,value更大的优先),因为limit小的必须先挑,否则当active_number很大的时候就无法再取了。排序之后逐个考察每个元素: +1. 如果`L>active_number`,那么就可以无脑选取该元素的value。 +2. 反之,意味着我们无法再新加这个元素,但是我们依然有机会更新ret,那就是将已经选取的元素里踢掉最小的一个,替换成当前的这个元素(如果更优的话)。这样的操作是合法的,因为被替换的元素的L更小,在它被选中的那个回合我们“回溯地”为这个L更大的元素是符合规则的。 + +以上就是本题的基本贪心规则。除此之外,题目还有一个规则,就是active_number其实是可以减少的。所以我们需要用一个Hash表,记录每种limit的元素已经有多少被选中了(即active的状态)。当我们的active_number增长到某个数值A的时候,需要再减去Map[A],同时将Map[A]清零,并且以后再次遇到limit是A的元素都直接跳过。 + diff --git a/Priority_Queue/774.Minimize-Max-Distance-to-Gas-Station/774.Minimize-Max-Distance-to-Gas-Station_pq.cpp b/Priority_Queue/774.Minimize-Max-Distance-to-Gas-Station/774.Minimize-Max-Distance-to-Gas-Station_pq.cpp index 4b8a13df6..4bfa71ae2 100644 --- a/Priority_Queue/774.Minimize-Max-Distance-to-Gas-Station/774.Minimize-Max-Distance-to-Gas-Station_pq.cpp +++ b/Priority_Queue/774.Minimize-Max-Distance-to-Gas-Station/774.Minimize-Max-Distance-to-Gas-Station_pq.cpp @@ -2,17 +2,25 @@ class Solution { public: double minmaxGasDist(vector& stations, int K) { + double ub = double(stations.back()-stations[0]) / (K + 1); + priority_queue>pq; for (int i=1; i0) { double space = pq.top().first; - int insertNum = pq.top().second; + int parts = pq.top().second; pq.pop(); - pq.push({space*insertNum/(insertNum+1),insertNum+1}); + pq.push({space*parts/(parts+1), parts+1}); + K--; } return pq.top().first; diff --git a/Priority_Queue/774.Minimize-Max-Distance-to-Gas-Station/Readme.md b/Priority_Queue/774.Minimize-Max-Distance-to-Gas-Station/Readme.md index 282a76dd1..77d1db14a 100644 --- a/Priority_Queue/774.Minimize-Max-Distance-to-Gas-Station/Readme.md +++ b/Priority_Queue/774.Minimize-Max-Distance-to-Gas-Station/Readme.md @@ -2,15 +2,13 @@ #### 解法1:贪心法 -贪心法有非常巧妙的思想,这里用到了pq. +贪心法有非常巧妙的思想,这里用到了大顶堆的优先对了. 队列里放置的是`{space, parts}`,表示有连续parts个长度为space的区间。注意,`space*parts`其实代表的一定是某两个加油站之间的距离。最开始时,我们放入所有的`{stations[i]-stations[i-1], 1}`。 -首先我们将所有老加油站之间的间隔距离放入pq,默认是大顶堆,这些老加油站的间隔都没有新加油站插入。那么,对于队首的这个间距最大,说明我们要对其下手,先尝试将这个间距除以2,这里除以2表明原本是没有新加油站的,现在加入一个。然后将这个新间隔放入队列。 +如果我们还有增加加油站的名额,那么必然是对PQ的队首元素`{space, parts}`操作,试图将当前全局最大的这个space降下来。我们只需要试图在`space*parts`这个区段里再增加一个。于是我们弹出队首元素之后,再往PQ里放入`{space*parts/(parts+1), parts+1}`即可。 -每次我们取队首元素,总是得到的是(当前最大的)某两个老加油站之间的新间隔,以及这两个老加油站之间插入的新加油站数量m。我们需要做的,是重新规划这两个老加油站之间的间隔,改成插入的新加油站数量为m+1. +重复上述过程,直至我们用完了所有新增加油站的配额。此时队首元素的space,就是当前最大的新加油站的间距。 -重复上述过程,直至加入新加油站的总是达到了K。此时队首的老加油站之间的新间距,就是整体最大的间距。 - -这个方法非常巧妙,只可惜仍然超时。 +这个方法非常巧妙,只可惜仍然超时。这里有一个改进的方法。我们可以知道,如果所有的旧加油站不存在的话,那么我们直接可以得到最优的最大间距,即平分整个数轴,得到`ub = (stations.back()-stations[0])/(K+1)`。现在由于这些旧加油站的存在,可以帮助我们将最大间距变得更小。所以对于任意两个旧加油站之间的dist,我们至少应该拆分为`dist/ub`个parts,使得细分的space先一步到位直接逼近ub,也就是提前预定了一些新加油站的数目。这样可以提高效率,避免对新加油站一个一个的配置。 #### 解法2:二分法 @@ -21,4 +19,4 @@ 最后知道二分的搜索精度小于1e-6. -[Leetcode Link](https://leetcode.com/problems/minimize-max-distance-to-gas-station) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/minimize-max-distance-to-gas-station) diff --git a/Priority_Queue/857.Minimum-Cost-to-Hire-K-Workers/Readme.md b/Priority_Queue/857.Minimum-Cost-to-Hire-K-Workers/Readme.md index 910ea9349..969c7251f 100644 --- a/Priority_Queue/857.Minimum-Cost-to-Hire-K-Workers/Readme.md +++ b/Priority_Queue/857.Minimum-Cost-to-Hire-K-Workers/Readme.md @@ -4,9 +4,9 @@ ```wage[i]/quality[i]```最高的那位,意味着最不实惠的工人,它拉高了unitWage,使得其他工人都必须按照这个unitWage乘以各自的quality拿工资.但转念一想,如果我们必须雇佣这个最不实惠的工人的话,那么剩下的工人该如何选择呢?显然我们只要选K-1个quality最低的工人,他们可以拉高那个"最不实惠工人"的quality比重,从而减少其他工人的quality比重,从而降低总工资. -我们再考虑,如果选择了```wage[i]/quality[i]```第二高的那位,那么我们就在接下来的N-2个人里面选择K-1个quality最底的工人即可. +我们再考虑,如果选择了```wage[i]/quality[i]```第二高的那位,那么我们就在接下来的N-2个人(本质是性价比相对于i更优的N-2个人)里面选择K-1个quality最底的工人即可. -由此贪心法的最优策略就出来了.实际操作中,我们根据```wage[i]/quality[i]```从低到高进行处理. +由此贪心法的最优策略就出来了.实际操作中,我们根据```wage[i]/quality[i]```从低到高进行处理.如果选i,那么我们就在i之前的员工里挑K-1个quality最小的人。 [Leetcode Link](https://leetcode.com/problems/minimum-cost-to-hire-k-workers) diff --git a/Readme.md b/Readme.md index 4328e7c36..9ced2324e 100644 --- a/Readme.md +++ b/Readme.md @@ -29,13 +29,16 @@ [1580.Put-Boxes-Into-the-Warehouse-II](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/1580.Put-Boxes-Into-the-Warehouse-II) (H-) [1687.Delivering-Boxes-from-Storage-to-Ports](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/1687.Delivering-Boxes-from-Storage-to-Ports) (H) [1793.Maximum-Score-of-a-Good-Subarray](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/1793.Maximum-Score-of-a-Good-Subarray) (M+) -[1798.Maximum-Number-of-Consecutive-Values-You-Can-Make/Readme.md](https://github.com/wisdompeak/LeetCode/blob/master/Greedy/1798.Maximum-Number-of-Consecutive-Values-You-Can-Make) (H-) [1989.Maximum-Number-of-People-That-Can-Be-Caught-in-Tag](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/1989.Maximum-Number-of-People-That-Can-Be-Caught-in-Tag) (M+) +[2354.Number-of-Excellent-Pairs](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/2354.Number-of-Excellent-Pairs) (H-) +[2422.Merge-Operations-to-Turn-Array-Into-a-Palindrome](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/2422.Merge-Operations-to-Turn-Array-Into-a-Palindrome) (H-) * ``Sliding window`` [532.K-diff-Pairs-in-an-Array](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/532.K-diff-Pairs-in-an-Array) (H-) [611.Valid-Triangle-Number](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/611.Valid-Triangle-Number) (M+) +[930.Binary-Subarrays-With-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Hash/930.Binary-Subarrays-With-Sum) (M+) [1004.Max-Consecutive-Ones-III](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/1004.Max-Consecutive-Ones-III) (M) [1052.Grumpy-Bookstore-Owner](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/1052.Grumpy-Bookstore-Owner) (M) +[1358.Number-of-Substrings-Containing-All-Three-Characters](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/1358.Number-of-Substrings-Containing-All-Three-Characters) (M) [1838.Frequency-of-the-Most-Frequent-Element](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/1838.Frequency-of-the-Most-Frequent-Element) (H-) [395.Longest-Substring-with-At-Least-K-Repeating-Characters](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/395.Longest-Substring-with-At-Least-K-Repeating-Characters) (H) [1763.Longest-Nice-Substring](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/1763.Longest-Nice-Substring) (H) @@ -43,13 +46,31 @@ [2024.Maximize-the-Confusion-of-an-Exam](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/2024.Maximize-the-Confusion-of-an-Exam) (M) [424.Longest-Repeating-Character-Replacement](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/424.Longest-Repeating-Character-Replacement) (H-) [2106.Maximum-Fruits-Harvested-After-at-Most-K-Steps](https://github.com/wisdompeak/LeetCode/blob/master/Two_Pointers/2106.Maximum-Fruits-Harvested-After-at-Most-K-Steps) (H) +[2401.Longest-Nice-Subarray](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/2401.Longest-Nice-Subarray) (H-) +[2411.Smallest-Subarrays-With-Maximum-Bitwise-OR](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/2411.Smallest-Subarrays-With-Maximum-Bitwise-OR) (H-) +[2516.Take-K-of-Each-Character-From-Left-and-Right](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/2516.Take-K-of-Each-Character-From-Left-and-Right) (M+) +[2564.Substring-XOR-Queries](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/2564.Substring-XOR-Queries) (H-) +[2730.Find-the-Longest-Semi-Repetitive-Substring](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/2730.Find-the-Longest-Semi-Repetitive-Substring) (M+) +[2747.Count-Zero-Request-Servers](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/2747.Count-Zero-Request-Servers) (H-) +[2831.Find-the-Longest-Equal-Subarray](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/2831.Find-the-Longest-Equal-Subarray) (M) +[2953.Count-Complete-Substrings](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/2953.Count-Complete-Substrings) (H) +[2958.Length-of-Longest-Subarray-With-at-Most-K-Frequency](https://github.com/wisdompeak/LeetCode/blob/master/Two_Pointers/2958.Length-of-Longest-Subarray-With-at-Most-K-Frequency) (M) +[2968.Apply-Operations-to-Maximize-Frequency-Score](https://github.com/wisdompeak/LeetCode/tree/master/Math/2968.Apply-Operations-to-Maximize-Frequency-Score) (H-) +[3234.Count-the-Number-of-Substrings-With-Dominant-Ones](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/3234.Count-the-Number-of-Substrings-With-Dominant-Ones) (H-) +[3634.Minimum-Removals-to-Balance-Array](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/3634.Minimum-Removals-to-Balance-Array) (M+) * ``Sliding window : Distinct Characters`` [076.Minimum-Window-Substring](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/076.Minimum-Window-Substring) (M+) [003.Longest-Substring-Without-Repeating-Character](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/003.Longest%20Substring%20Without%20Repeating%20Characters) (E+) [159.Longest-Substring-with-At-Most-Two-Distinct-Characters](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/159.Longest-Substring-with-At-Most-Two-Distinct-Characters)(H-) [340.Longest-Substring-with-At-Most-K-Distinct-Characters](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/340.Longest-Substring-with-At-Most-K-Distinct-Characters) (H) -[992.Subarrays-with-K-Different-Integers](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/992.Subarrays-with-K-Different-Integers) (H-) -* ``Two pointers for two seuqences`` +[992.Subarrays-with-K-Different-Integers](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/992.Subarrays-with-K-Different-Integers) (H-) +[3134.Find-the-Median-of-the-Uniqueness-Array](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/3134.Find-the-Median-of-the-Uniqueness-Array) (H-) +[2461.Maximum-Sum-of-Distinct-Subarrays-With-Length-K](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/2461.Maximum-Sum-of-Distinct-Subarrays-With-Length-K) (M) +[2537.Count-the-Number-of-Good-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/2537.Count-the-Number-of-Good-Subarrays) (M+) +[3298.Count-Substrings-That-Can-Be-Rearranged-to-Contain-a-String-II](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/3298.Count-Substrings-That-Can-Be-Rearranged-to-Contain-a-String-II) (M+) +[3306.Count-of-Substrings-Containing-Every-Vowel-and-K-Consonants-II](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/3306.Count-of-Substrings-Containing-Every-Vowel-and-K-Consonants-II) (H-) +[3641.Longest-Semi-Repeating-Subarray](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/3641.Longest-Semi-Repeating-Subarray) (H-) +* ``Two pointers for two sequences`` [986.Interval-List-Intersections](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/986.Interval-List-Intersections) (M) [1229.Meeting-Scheduler](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/1229.Meeting-Scheduler) (M+) [1537.Get-the-Maximum-Score](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/1537.Get-the-Maximum-Score) (H-) @@ -69,7 +90,6 @@ [222.Count-Complete-Tree-Nodes](https://github.com/wisdompeak/LeetCode/tree/master/Tree/222.Count-Complete-Tree-Nodes) (H-) [275.H-index II](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/275.H-Index-II) (H) [302.Smallest-Rectangle-Enclosing-Black-Pixels](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/302.Smallest-Rectangle-Enclosing-Black-Pixels) (M+) -[410.Split-Array-Largest-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/410.Split-Array-Largest-Sum) (H) [475.Heaters](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/475.Heaters) (H-) [483.Smallest-Good-Base](https://github.com/wisdompeak/LeetCode/blob/master/Binary_Search/483.Smallest-Good-Base) (H) [029.Divide-Two-Integers](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/029.Divide-Two-Integers) (M+) @@ -77,34 +97,30 @@ [658.Find-K-Closest-Elements](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/658.Find-K-Closest-Elements) (H) 1095.Find-in-Mountain-Array (TBD) [1157.Online-Majority-Element-In-Subarray](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1157.Online-Majority-Element-In-Subarray) (H-) -1201.Ugly-Number-III (TBD) [1533.Find-the-Index-of-the-Large-Integer](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1533.Find-the-Index-of-the-Large-Integer) (M) [1712.Ways-to-Split-Array-Into-Three-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1712.Ways-to-Split-Array-Into-Three-Subarrays) (H) [1889.Minimum-Space-Wasted-From-Packaging](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1889.Minimum-Space-Wasted-From-Packaging) (H-) [1901.Find-a-Peak-Element-II](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1901.Find-a-Peak-Element-II) (H) -[2106.Maximum-Fruits-Harvested-After-at-Most-K-Steps](https://github.com/wisdompeak/LeetCode/blob/master/Two_Pointers/2106.Maximum-Fruits-Harvested-After-at-Most-K-Steps) (H) -[2141.Maximum-Running-Time-of-N-Computers](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2141.Maximum-Running-Time-of-N-Computers) (M+) -* ``Binary Processing`` -[1483.Kth-Ancestor-of-a-Tree-Node](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1483.Kth-Ancestor-of-a-Tree-Node) (H) -[1922.Count-Good-Numbers](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1922.Count-Good-Numbers) (M) +[2563.Count-the-Number-of-Fair-Pairs](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2563.Count-the-Number-of-Fair-Pairs) (M+) +[2819.Minimum-Relative-Loss-After-Buying-Chocolates](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2819.Minimum-Relative-Loss-After-Buying-Chocolates) (H) +[2972.Count-the-Number-of-Incremovable-Subarrays-II](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2972.Count-the-Number-of-Incremovable-Subarrays-II) (H-) +* ``Binary Lifting`` +[1483.Kth-Ancestor-of-a-Tree-Node](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1483.Kth-Ancestor-of-a-Tree-Node) (H) +[2277.Closest-Node-to-Path-in-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2277.Closest-Node-to-Path-in-Tree) (H) +[2836.Maximize-Value-of-Function-in-a-Ball-Passing-Game](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2836.Maximize-Value-of-Function-in-a-Ball-Passing-Game) (H) +[2846.Minimum-Edge-Weight-Equilibrium-Queries-in-a-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2846.Minimum-Edge-Weight-Equilibrium-Queries-in-a-Tree) (H) +[2851.String-Transformation](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2851.String-Transformation) (H+) +[3534.Path-Existence-Queries-in-a-Graph-II](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/3534.Path-Existence-Queries-in-a-Graph-II) (H) +[3553.Minimum-Weighted-Subgraph-With-the-Required-Paths-II](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/3553.Minimum-Weighted-Subgraph-With-the-Required-Paths-II) (H) +[3559.Number-of-Ways-to-Assign-Edge-Weights-II](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/3559.Number-of-Ways-to-Assign-Edge-Weights-II) (H-) +[3585.Find-Weighted-Median-Node-in-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/3585.Find-Weighted-Median-Node-in-Tree) (H) * ``Binary Search by Value`` -[215.Kth-Largest-Element-in-an-Array](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/215.Kth-Largest-Element-in-an-Array) (M) -[287.Find-the-Duplicate-Number](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/287.Find-the-Duplicate-Number) (H-) -[378.Kth-Smallest-Element-in-a-Sorted-Matrix](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/378.Kth-Smallest-Element-in-a-Sorted-Matrix) (H-) -[373.Find-K-Pairs-with-Smallest-Sums](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/373.Find-K-Pairs-with-Smallest-Sums) (H) -[668.Kth-Smallest-Number-in-Multiplication-Table](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/668.Kth-Smallest-Number-in-Multiplication-Table) (H-) -[719.Find-Kth-Smallest-Pair-Distance](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/719.Find-K-th-Smallest-Pair-Distance) (H-) -[1918.Kth-Smallest-Subarray-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1918.Kth-Smallest-Subarray-Sum) (M+) -[2040.Kth-Smallest-Product-of-Two-Sorted-Arrays](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2040.Kth-Smallest-Product-of-Two-Sorted-Arrays) (H-) -[1439.Find-the-Kth-Smallest-Sum-of-a-Matrix-With-Sorted-Rows](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/1439.Find-the-Kth-Smallest-Sum-of-a-Matrix-With-Sorted-Rows) (H) +[410.Split-Array-Largest-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/410.Split-Array-Largest-Sum) (H-) [774.Minimize-Max-Distance-to-Gas-Station](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/774.Minimize-Max-Distance-to-Gas-Station) (H) -[786.Kth-Smallest-Prime-Fraction](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/786.K-th%20Smallest-Prime-Fraction) (H-) -[793.Preimage-Size-of-Factorial-Zeroes-Function](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/793.Preimage-Size-of-Factorial-Zeroes-Function) (H-) [1011.Capacity-To-Ship-Packages-Within-D-Days](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1011.Capacity-To-Ship-Packages-Within-D-Days) (M) [1060.Missing-Element-in-Sorted-Array](https://github.com/wisdompeak/LeetCode/blob/master/Binary_Search/1060.Missing-Element-in-Sorted-Array) (H) [1102.Path-With-Maximum-Minimum-Value](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1102.Path-With-Maximum-Minimum-Value) (H-) -[1539.Kth-Missing-Positive-Number](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1539.Kth-Missing-Positive-Number) (H-) -[1201.Ugly-Number-III](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1201.Ugly-Number-III) (H-) +[1631.Path-With-Minimum-Effort](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1631.Path-With-Minimum-Effort) (H-) [1231.Divide-Chocolate](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1231.Divide-Chocolate) (M) [1283.Find-the-Smallest-Divisor-Given-a-Threshold](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1283.Find-the-Smallest-Divisor-Given-a-Threshold) (M) [1292.Maximum-Side-Length-of-a-Square-with-Sum-Less-than-or-Equal-to-Threshold](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1292.Maximum-Side-Length-of-a-Square-with-Sum-Less-than-or-Equal-to-Threshold) (H-) @@ -120,16 +136,54 @@ [1891.Cutting-Ribbons](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1891.Cutting-Ribbons) (E) [2064.Minimized-Maximum-of-Products-Distributed-to-Any-Store](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2064.Minimized-Maximum-of-Products-Distributed-to-Any-Store) (M) [2071.Maximum-Number-of-Tasks-You-Can-Assign](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2071.Maximum-Number-of-Tasks-You-Can-Assign) (H) -[2102.Sequentially-Ordinal-Rank-Tracker](https://github.com/wisdompeak/LeetCode/tree/master/Heap/2102.Sequentially-Ordinal-Rank-Tracker) (H-) +[2106.Maximum-Fruits-Harvested-After-at-Most-K-Steps](https://github.com/wisdompeak/LeetCode/blob/master/Two_Pointers/2106.Maximum-Fruits-Harvested-After-at-Most-K-Steps) (H) [2137.Pour-Water-Between-Buckets-to-Make-Water-Levels-Equal](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2137.Pour-Water-Between-Buckets-to-Make-Water-Levels-Equal) (M) +[2141.Maximum-Running-Time-of-N-Computers](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2141.Maximum-Running-Time-of-N-Computers) (M+) +[2226.Maximum-Candies-Allocated-to-K-Children](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2226.Maximum-Candies-Allocated-to-K-Children) (M) +[2439.Minimize-Maximum-of-Array](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2439.Minimize-Maximum-of-Array) (H-) +[2517.Maximum-Tastiness-of-Candy-Basket](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2517.Maximum-Tastiness-of-Candy-Basket) (M+) +[2513.Minimize-the-Maximum-of-Two-Arrays](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2513.Minimize-the-Maximum-of-Two-Arrays) (H) +[2528.Maximize-the-Minimum-Powered-City](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2528.Maximize-the-Minimum-Powered-City) (H-) +[2557.Maximum-Number-of-Integers-to-Choose-From-a-Range-II](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2557.Maximum-Number-of-Integers-to-Choose-From-a-Range-II) (H-) +[2560.House-Robber-IV](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2560.House-Robber-IV) (H-) +[2594.Minimum-Time-to-Repair-Cars](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2594.Minimum-Time-to-Repair-Cars) (M) +[2604.Minimum-Time-to-Eat-All-Grains](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2604.Minimum-Time-to-Eat-All-Grains) (H-) +[2616.Minimize-the-Maximum-Difference-of-Pairs](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2616.Minimize-the-Maximum-Difference-of-Pairs) (H-) +[2702.Minimum-Operations-to-Make-Numbers-Non-positive](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2702.Minimum-Operations-to-Make-Numbers-Non-positive) (H-) +[2861.Maximum-Number-of-Alloys](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2861.Maximum-Number-of-Alloys) (M+) +[3048.Earliest-Second-to-Mark-Indices-I](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/3048.Earliest-Second-to-Mark-Indices-I) (M+) +[3049.Earliest-Second-to-Mark-Indices-II](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/3049.Earliest-Second-to-Mark-Indices-II) (H) +[3097.Shortest-Subarray-With-OR-at-Least-K-II](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/3097.Shortest-Subarray-With-OR-at-Least-K-II) (M) +[3399.Smallest-Substring-With-Identical-Characters-II](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/3399.Smallest-Substring-With-Identical-Characters-II) (H-) +[3449.Maximize-the-Minimum-Game-Score](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/3449.Maximize-the-Minimum-Game-Score) (H-) +[3464.Maximize-the-Distance-Between-Points-on-a-Square](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/3464.Maximize-the-Distance-Between-Points-on-a-Square) (H) +[3639.Minimum-Time-to-Activate-String](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/3639.Minimum-Time-to-Activate-String) (M) +[3677.Count-Binary-Palindromic-Numbers](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/3677.Count-Binary-Palindromic-Numbers) (H-) + * ``Find K-th Element`` +[215.Kth-Largest-Element-in-an-Array](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/215.Kth-Largest-Element-in-an-Array) (M) +[287.Find-the-Duplicate-Number](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/287.Find-the-Duplicate-Number) (H-) +[378.Kth-Smallest-Element-in-a-Sorted-Matrix](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/378.Kth-Smallest-Element-in-a-Sorted-Matrix) (H-) +[373.Find-K-Pairs-with-Smallest-Sums](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/373.Find-K-Pairs-with-Smallest-Sums) (H) +[668.Kth-Smallest-Number-in-Multiplication-Table](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/668.Kth-Smallest-Number-in-Multiplication-Table) (H-) +[719.Find-Kth-Smallest-Pair-Distance](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/719.Find-K-th-Smallest-Pair-Distance) (H-) +[1918.Kth-Smallest-Subarray-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1918.Kth-Smallest-Subarray-Sum) (M+) +[2040.Kth-Smallest-Product-of-Two-Sorted-Arrays](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2040.Kth-Smallest-Product-of-Two-Sorted-Arrays) (H-) +[1439.Find-the-Kth-Smallest-Sum-of-a-Matrix-With-Sorted-Rows](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/1439.Find-the-Kth-Smallest-Sum-of-a-Matrix-With-Sorted-Rows) (H) +[786.Kth-Smallest-Prime-Fraction](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/786.K-th%20Smallest-Prime-Fraction) (H-) +[793.Preimage-Size-of-Factorial-Zeroes-Function](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/793.Preimage-Size-of-Factorial-Zeroes-Function) (H-) +[1201.Ugly-Number-III](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1201.Ugly-Number-III) (H-) +[1539.Kth-Missing-Positive-Number](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1539.Kth-Missing-Positive-Number) (H-) +[2387.Median-of-a-Row-Wise-Sorted-Matrix](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2387.Median-of-a-Row-Wise-Sorted-Matrix) (H-) +[3116.Kth-Smallest-Amount-With-Single-Denomination-Combination](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/3116.Kth-Smallest-Amount-With-Single-Denomination-Combination) (H) +[3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits](https://github.com/wisdompeak/LeetCode/tree/master/Trie/3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits) (H) +[3134.Find-the-Median-of-the-Uniqueness-Array](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/3134.Find-the-Median-of-the-Uniqueness-Array) (H-) -#### [Hash Table](https://github.com/wisdompeak/LeetCode/tree/master/Hash) +#### [Hash Map](https://github.com/wisdompeak/LeetCode/tree/master/Hash) [049.Group-Anagrams](https://github.com/wisdompeak/LeetCode/tree/master/Hash/049.Group-Anagrams) (M+) [149.Max-Points-on-a-Line](https://github.com/wisdompeak/LeetCode/tree/master/Hash/149.Max-Points-on-a-Line) (H) [166.Fraction-to-Recurring-Decimal](https://github.com/wisdompeak/LeetCode/tree/master/Hash/166.Fraction-to-Recurring-Decimal) (M) [170.Two-Sum-III-Data-structure-design](https://github.com/wisdompeak/LeetCode/tree/master/Hash/170.Two-Sum-III-Data-structure-design) (M) [392.Is-Subsequence](https://github.com/wisdompeak/LeetCode/tree/master/Hash/392.Is-Subsequence) (H-) -[204.Count Primes](https://github.com/wisdompeak/LeetCode/tree/master/Hash/204.Count-Primes) (M) [274.H-Index](https://github.com/wisdompeak/LeetCode/tree/master/Hash/274.H-Index) (H) [325.Maximum-Size-Subarray-Sum-Equals-k](https://github.com/wisdompeak/LeetCode/tree/master/Hash/325.Maximum-Size-Subarray-Sum-Equals-k) (M) [409.Longest-Palindrome](https://github.com/wisdompeak/LeetCode/tree/master/Hash/409.Longest-Palindrome) (M) @@ -145,13 +199,14 @@ [939.Minimum-Area-Rectangle](https://github.com/wisdompeak/LeetCode/tree/master/Hash/939.Minimum-Area-Rectangle) (M+) 982.Triples-with-Bitwise-AND-Equal-To-Zero (M+) (TBD) [1074.Number-of-Submatrices-That-Sum-to-Target](https://github.com/wisdompeak/LeetCode/tree/master/Hash/1074.Number-of-Submatrices-That-Sum-to-Target) (M+) -1224.Maximum-Equal-Frequency (H-) [1487.Making-File-Names-Unique](https://github.com/wisdompeak/LeetCode/tree/master/Hash/1487.Making-File-Names-Unique) (M+) [1573.Number-of-Ways-to-Split-a-String](https://github.com/wisdompeak/LeetCode/tree/master/Hash/1573.Number-of-Ways-to-Split-a-String) (M) [2131.Longest-Palindrome-by-Concatenating-Two-Letter-Words](https://github.com/wisdompeak/LeetCode/tree/master/Hash/2131.Longest-Palindrome-by-Concatenating-Two-Letter-Words) (M) +[2198.Number-of-Single-Divisor-Triplets](https://github.com/wisdompeak/LeetCode/tree/master/Hash/2198.Number-of-Single-Divisor-Triplets) (H-) * ``Hash+Prefix`` [525.Contiguous-Array](https://github.com/wisdompeak/LeetCode/tree/master/Hash/525.Contiguous-Array) (M) [930.Binary-Subarrays-With-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Hash/930.Binary-Subarrays-With-Sum) (M) +[1983.Widest-Pair-of-Indices-With-Equal-Range-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Hash/1983.Widest-Pair-of-Indices-With-Equal-Range-Sum) (M) [1442.Count-Triplets-That-Can-Form-Two-Arrays-of-Equal-XOR](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/1442.Count-Triplets-That-Can-Form-Two-Arrays-of-Equal-XOR) (H-) [1524.Number-of-Sub-arrays-With-Odd-Sum ](https://github.com/wisdompeak/LeetCode/tree/master/Hash/1524.Number-of-Sub-arrays-With-Odd-Sum) (M) [974.Subarray-Sums-Divisible-by-K](https://github.com/wisdompeak/LeetCode/tree/master/Hash/974.Subarray-Sums-Divisible-by-K) (M) @@ -160,31 +215,54 @@ [1371.Find-the-Longest-Substring-Containing-Vowels-in-Even-Counts](https://github.com/wisdompeak/LeetCode/tree/master/Hash/1371.Find-the-Longest-Substring-Containing-Vowels-in-Even-Counts) (H-) [1542.Find-Longest-Awesome-Substring](https://github.com/wisdompeak/LeetCode/tree/master/Hash/1542.Find-Longest-Awesome-Substring) (H-) [1915.Number-of-Wonderful-Substrings](https://github.com/wisdompeak/LeetCode/tree/master/Hash/1915.Number-of-Wonderful-Substrings) (M+) -[1983.Widest-Pair-of-Indices-With-Equal-Range-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Hash/1983.Widest-Pair-of-Indices-With-Equal-Range-Sum) (M+) [2025.Maximum-Number-of-Ways-to-Partition-an-Array](https://github.com/wisdompeak/LeetCode/tree/master/Hash/2025.Maximum-Number-of-Ways-to-Partition-an-Array) (H) +[2488.Count-Subarrays-With-Median-K](https://github.com/wisdompeak/LeetCode/tree/master/Hash/2488.Count-Subarrays-With-Median-K) (H-) +[2489.Number-of-Substrings-With-Fixed-Ratio](https://github.com/wisdompeak/LeetCode/tree/master/Hash/2489.Number-of-Substrings-With-Fixed-Ratio) (H-) +[2588.Count-the-Number-of-Beautiful-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Hash/2588.Count-the-Number-of-Beautiful-Subarrays) (M+) +[2845.Count-of-Interesting-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Hash/2845.Count-of-Interesting-Subarrays) (M+) +[2875.Minimum-Size-Subarray-in-Infinite-Array](https://github.com/wisdompeak/LeetCode/tree/master/Hash/2875.Minimum-Size-Subarray-in-Infinite-Array) (H-) +[2949.Count-Beautiful-Substrings-II](https://github.com/wisdompeak/LeetCode/tree/master/Hash/2949.Count-Beautiful-Substrings-II) (H-) +[2950.Number-of-Divisible-Substrings](https://github.com/wisdompeak/LeetCode/tree/master/Hash/2950.Number-of-Divisible-Substrings) (H-) +[3448.Count-Substrings-Divisible-By-Last-Digit](https://github.com/wisdompeak/LeetCode/tree/master/Hash/3448.Count-Substrings-Divisible-By-Last-Digit) (H-) -#### [Heap](https://github.com/wisdompeak/LeetCode/tree/master/Heap) -[220.Contains-Duplicate-III](https://github.com/wisdompeak/LeetCode/tree/master/Heap/220.Contains-Duplicate-III) (M) -[295.Find-Median-from-Data-Stream](https://github.com/wisdompeak/LeetCode/tree/master/Heap/295.Find-Median-from-Data-Stream) (M) -[363.Max-Sum-of-Rectangle-No-Larger-Than-K](https://github.com/wisdompeak/LeetCode/tree/master/Heap/363.Max-Sum-of-Rectangle-No-Larger-Than-K) (H) -[352.Data-Stream-as-Disjoint-Intervals](https://github.com/wisdompeak/LeetCode/tree/master/Heap/352.Data-Stream-as-Disjoint-Intervals) (H) -[480.Sliding-Window-Median](https://github.com/wisdompeak/LeetCode/blob/master/Heap/480.Sliding-Window-Median) (H) -[218.The-Skyline-Problem](https://github.com/wisdompeak/LeetCode/blob/master/Segment_Tree/218.The-Skyline-Problem) (H) +#### [Sorted Container](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container) +[220.Contains-Duplicate-III](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/220.Contains-Duplicate-III) (M) +[363.Max-Sum-of-Rectangle-No-Larger-Than-K](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/363.Max-Sum-of-Rectangle-No-Larger-Than-K) (H) +[352.Data-Stream-as-Disjoint-Intervals](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/352.Data-Stream-as-Disjoint-Intervals) (H) +[480.Sliding-Window-Median](https://github.com/wisdompeak/LeetCode/blob/master/Sorted_Container/480.Sliding-Window-Median) (H) [699.Falling-Squares](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/699.Falling-Squares) (H) -[715.Range-Module](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/715.Range-Module) (H) -[729.My-Calendar-I](https://github.com/wisdompeak/LeetCode/tree/master/Heap/729.My-Calendar-I) (M) -[975.Odd-Even-Jump](https://github.com/wisdompeak/LeetCode/tree/master/Heap/975.Odd-Even-Jump) (H-) -[632.Smallest-Range-Covering-Elements-from-K-Lists](https://github.com/wisdompeak/LeetCode/tree/master/Heap/632.Smallest-Range-Covering-Elements-from-K-Lists) (H-) -[1675.Minimize-Deviation-in-Array](https://github.com/wisdompeak/LeetCode/tree/master/Heap/1675.Minimize-Deviation-in-Array) (H) -[1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers](https://github.com/wisdompeak/LeetCode/tree/master/Heap/1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers) (M) +[729.My-Calendar-I](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/729.My-Calendar-I) (M) +[855.Exam-Room](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/855.Exam-Room) (M+) +[975.Odd-Even-Jump](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/975.Odd-Even-Jump) (H-) +[632.Smallest-Range-Covering-Elements-from-K-Lists](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/632.Smallest-Range-Covering-Elements-from-K-Lists) (H-) +[1675.Minimize-Deviation-in-Array](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/1675.Minimize-Deviation-in-Array) (H) +[1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers) (M) 1348.Tweet-Counts-Per-Frequency (H-) -[1606.Find-Servers-That-Handled-Most-Number-of-Requests](https://github.com/wisdompeak/LeetCode/tree/master/Heap/1606.Find-Servers-That-Handled-Most-Number-of-Requests) (M) +[1488.Avoid-Flood-in-The-City](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/1488.Avoid-Flood-in-The-City) (H-) +[1606.Find-Servers-That-Handled-Most-Number-of-Requests](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/1606.Find-Servers-That-Handled-Most-Number-of-Requests) (M) 1797.Design Authentication Manager (M) -[1825.Finding-MK-Average](https://github.com/wisdompeak/LeetCode/tree/master/Heap/1825.Finding-MK-Average) (H) -[1847.Closest-Room](https://github.com/wisdompeak/LeetCode/tree/master/Heap/1847.Closest-Room) (M+) -[1912.Design-Movie-Rental-System](https://github.com/wisdompeak/LeetCode/tree/master/Heap/1912.Design-Movie-Rental-System) (M+) +[1847.Closest-Room](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/1847.Closest-Room) (M+) +[1912.Design-Movie-Rental-System](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/1912.Design-Movie-Rental-System) (M+) 2034.Stock Price Fluctuation (M) [2071.Maximum-Number-of-Tasks-You-Can-Assign](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/2071.Maximum-Number-of-Tasks-You-Can-Assign) (H) +[2612.Minimum-Reverse-Operations](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/2612.Minimum-Reverse-Operations) (H) +[2736.Maximum-Sum-Queries](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/2736.Maximum-Sum-Queries) (H) +* ``Dual Multiset`` +[295.Find-Median-from-Data-Stream](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/295.Find-Median-from-Data-Stream) (M) +[1825.Finding-MK-Average](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/1825.Finding-MK-Average) (H) +[2653.Sliding-Subarray-Beauty](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/2653.Sliding-Subarray-Beauty) (M+) +[3013.Divide-an-Array-Into-Subarrays-With-Minimum-Cost-II](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/3013.Divide-an-Array-Into-Subarrays-With-Minimum-Cost-II) (H-) +* ``Maintain intervals`` +[715.Range-Module](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/715.Range-Module) (H) +[2213.Longest-Substring-of-One-Repeating-Character](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/2213.Longest-Substring-of-One-Repeating-Character) (H) +[2276.Count-Integers-in-Intervals](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/2276.Count-Integers-in-Intervals) (H-) +[2382.Maximum-Segment-Sum-After-Removals](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/2382.Maximum-Segment-Sum-After-Removals) (M+) +* ``Sorted_Container w/ monotonic mapping values`` +[2940.Find-Building-Where-Alice-and-Bob-Can-Meet](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/2940.Find-Building-Where-Alice-and-Bob-Can-Meet) (H) +[2926.Maximum-Balanced-Subsequence-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/2926.Maximum-Balanced-Subsequence-Sum) (H) +[2907.Maximum-Profitable-Triplets-With-Increasing-Prices-I](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/2907.Maximum-Profitable-Triplets-With-Increasing-Prices-I) (H) +[2945.Find-Maximum-Non-decreasing-Array-Length](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/2945.Find-Maximum-Non-decreasing-Array-Length) (H) +[3672.Sum-of-Weighted-Modes-in-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/3672.Sum-of-Weighted-Modes-in-Subarrays) (M+) #### [Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree) [144.Binary-Tree-Preorder-Traversal](https://github.com/wisdompeak/LeetCode/tree/master/Tree/144.Binary-Tree-Preorder-Traversal) (M+) @@ -216,7 +294,6 @@ 558.Quad-Tree-Intersection (M+) [662.Maximum-Width-of-Binary-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/662.Maximum-Width-of-Binary-Tree) (H-) 742.Closest-Leaf-in-a-Binary-Tree (H) -834.Sum-of-Distances-in-Tree (H) [863.All-Nodes-Distance-K-in-Binary-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/863.All-Nodes-Distance-K-in-Binary-Tree) (H-) [958.Check-Completeness-of-a-Binary-Tree](https://github.com/wisdompeak/LeetCode/blob/master/Tree/954.Check-Completeness-of-a-Binary-Tree/) (M+) 1339. Maximum-Product-of-Splitted-Binary-Tree (TBD) @@ -225,11 +302,25 @@ [1666.Change-the-Root-of-a-Binary-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/1666.Change-the-Root-of-a-Binary-Tree) (H-) [1932.Merge-BSTs-to-Create-Single-BST](https://github.com/wisdompeak/LeetCode/tree/master/Tree/1932.Merge-BSTs-to-Create-Single-BST) (H) [2003.Smallest-Missing-Genetic-Value-in-Each-Subtree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2003.Smallest-Missing-Genetic-Value-in-Each-Subtree) (H) +[2445.Number-of-Nodes-With-Value-One](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2445.Number-of-Nodes-With-Value-One) (M+) +* ``Regular DFS`` +[2322.Minimum-Score-After-Removals-on-a-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2322.Minimum-Score-After-Removals-on-a-Tree) (H-) +[2313.Minimum-Flips-in-Binary-Tree-to-Get-Result](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2313.Minimum-Flips-in-Binary-Tree-to-Get-Result) (H) +[2467.Most-Profitable-Path-in-a-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2467.Most-Profitable-Path-in-a-Tree) (M+) +[2458.Height-of-Binary-Tree-After-Subtree-Removal-Queries](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2458.Height-of-Binary-Tree-After-Subtree-Removal-Queries) (M+) +[2646.Minimize-the-Total-Price-of-the-Trips](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/2646.Minimize-the-Total-Price-of-the-Trips) (M+) +[2920.Maximum-Points-After-Collecting-Coins-From-All-Nodes](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2920.Maximum-Points-After-Collecting-Coins-From-All-Nodes) (H-) +[2925.Maximum-Score-After-Applying-Operations-on-a-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2925.Maximum-Score-After-Applying-Operations-on-a-Tree) (M) +[2973.Find-Number-of-Coins-to-Place-in-Tree-Nodes](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2973.Find-Number-of-Coins-to-Place-in-Tree-Nodes) (H-) * ``Path in a tree`` [543.Diameter-of-Binary-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/543.Diameter-of-Binary-Tree) (M) [124.Binary-Tree-Maximum-Path-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Tree/124.Binary-Tree-Maximum-Path-Sum) (M) [687.Longest-Univalue-Path](https://github.com/wisdompeak/LeetCode/tree/master/Tree/687.Longest-Univalue-Path) (M+) +[1522.Diameter-of-N-Ary-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/1522.Diameter-of-N-Ary-Tree) (M) [2049.Count-Nodes-With-the-Highest-Score](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2049.Count-Nodes-With-the-Highest-Score) (M+) +[2246.Longest-Path-With-Different-Adjacent-Characters](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2246.Longest-Path-With-Different-Adjacent-Characters) (M+) +[2538.Difference-Between-Maximum-and-Minimum-Price-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2538.Difference-Between-Maximum-and-Minimum-Price-Sum) (H) +[3203.Find-Minimum-Diameter-After-Merging-Two-Trees](https://github.com/wisdompeak/LeetCode/tree/master/Tree/3203.Find-Minimum-Diameter-After-Merging-Two-Trees) (H-) * ``Serialization & Hashing`` [297.Serialize-and-Deserialize-Binary-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/297.Serialize-and-Deserialize-Binary-Tree) (H-) [652.Find-Duplicate-Subtrees](https://github.com/wisdompeak/LeetCode/tree/master/Tree/652.Find-Duplicate-Subtrees) (H) @@ -254,37 +345,49 @@ [1650.Lowest-Common-Ancestor-of-a-Binary-Tree-III](https://github.com/wisdompeak/LeetCode/tree/master/Tree/1650.Lowest-Common-Ancestor-of-a-Binary-Tree-III) (M) [1740.Find-Distance-in-a-Binary-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/1740.Find-Distance-in-a-Binary-Tree) (H) [2096.Step-By-Step-Directions-From-a-Binary-Tree-Node-to-Another](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2096.Step-By-Step-Directions-From-a-Binary-Tree-Node-to-Another) (M+) +[2509.Cycle-Length-Queries-in-a-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2509.Cycle-Length-Queries-in-a-Tree) (M) * ``N-ary Tree`` [428.Serialize-and-Deserialize-N-ary-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/428.Serialize-and-Deserialize-N-ary-Tree) (H) [431.Encode-N-ary-Tree-to-Binary-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/431.Encode-N-ary-Tree-to-Binary-Tree) (H-) [1516.Move-Sub-Tree-of-N-Ary-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/1516.Move-Sub-Tree-of-N-Ary-Tree) (H-) +* ``Re-Root`` +[834.Sum-of-Distances-in-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/834.Sum-of-Distances-in-Tree) (H) +[2581.Count-Number-of-Possible-Root-Nodes](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2581.Count-Number-of-Possible-Root-Nodes) (H) +[2858.Minimum-Edge-Reversals-So-Every-Node-Is-Reachable](https://github.com/wisdompeak/LeetCode/tree/master/Tree/2858.Minimum-Edge-Reversals-So-Every-Node-Is-Reachable) (H-) * ``似树非树`` [823](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/823.Binary-Trees-With-Factors), [1902](https://github.com/wisdompeak/LeetCode/tree/master/Tree/1902.Depth-of-BST-Given-Insertion-Order), #### [Segment Tree](https://github.com/wisdompeak/LeetCode/blob/master/Segment_Tree/) -* ``Basics`` [307.Range-Sum-Query-Mutable](https://github.com/wisdompeak/LeetCode/blob/master/Segment_Tree/307.Range-Sum-Query-Mutable/) (H-) [1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array) (H-) [1649.Create-Sorted-Array-through-Instructions](https://github.com/wisdompeak/LeetCode/tree/master/Divide_Conquer/1649.Create-Sorted-Array-through-Instructions) (H-) [1157.Online-Majority-Element-In-Subarray](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1157.Online-Majority-Element-In-Subarray) (H) -* ``Lazy Tag`` [370.Range-Addition](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/370.Range-Addition) (H) [218.The-Skyline-Problem](https://github.com/wisdompeak/LeetCode/blob/master/Segment_Tree/218.The-Skyline-Problem) (H+) [699.Falling-Squares](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/699.Falling-Squares) (H) -* ``Others`` [715.Range-Module](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/715.Range-Module) (H) +[2286.Booking-Concert-Tickets-in-Groups](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/2286.Booking-Concert-Tickets-in-Groups) (H-) +[2407.Longest-Increasing-Subsequence-II](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/2407.Longest-Increasing-Subsequence-II) (H-) +[2569.Handling-Sum-Queries-After-Update](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/2569.Handling-Sum-Queries-After-Update) (H) +[2907.Maximum-Profitable-Triplets-With-Increasing-Prices-I](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/2907.Maximum-Profitable-Triplets-With-Increasing-Prices-I) (H) +[2916.Subarrays-Distinct-Element-Sum-of-Squares-II](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/2916.Subarrays-Distinct-Element-Sum-of-Squares-II) (H+) +[3072.Distribute-Elements-Into-Two-Arrays-II](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/3072.Distribute-Elements-Into-Two-Arrays-II) (H-) +[3161.Block-Placement-Queries](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/3161.Block-Placement-Queries) (H) +[3165.Maximum-Sum-of-Subsequence-With-Non-adjacent-Elements](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/3165.Maximum-Sum-of-Subsequence-With-Non-adjacent-Elements) (H) +[3187.Peaks-in-Array](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/3187.Peaks-in-Array) (M+) +[3261.Count-Substrings-That-Satisfy-20K-Constraint-II](https://github.com/wisdompeak/LeetCode/blob/master/Segment_Tree/3261.Count-Substrings-That-Satisfy-K-Constraint-II) (H-) #### [Binary Index Tree] [307.Range-Sum-Query-Mutable](https://github.com/wisdompeak/LeetCode/blob/master/Segment_Tree/307.Range-Sum-Query-Mutable/) (M) [1649.Create-Sorted-Array-through-Instructions](https://github.com/wisdompeak/LeetCode/tree/master/Divide_Conquer/1649.Create-Sorted-Array-through-Instructions) (H) [2031.Count-Subarrays-With-More-Ones-Than-Zeros](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/2031.Count-Subarrays-With-More-Ones-Than-Zeros) (H) [2179.Count-Good-Triplets-in-an-Array](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/2179.Count-Good-Triplets-in-an-Array) (H) +[2659.Make-Array-Empty](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/2659.Make-Array-Empty) (H) +[3624.Number-of-Integers-With-Popcount-Depth-Equal-to-K-II](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/3624.Number-of-Integers-With-Popcount-Depth-Equal-to-K-II) (H-) +[3671.Sum-of-Beautiful-Subsequences](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/3671.Sum-of-Beautiful-Subsequences) (H+) #### [Design](https://github.com/wisdompeak/LeetCode/tree/master/Design) -[146.LRU-Cache](https://github.com/wisdompeak/LeetCode/tree/master/Design/146.LRU-Cache) (H-) -[460.LFU Cache](https://github.com/wisdompeak/LeetCode/tree/master/Design/460.LFU-Cache) (H) -[432.All-O-one-Data-Structure](https://github.com/wisdompeak/LeetCode/tree/master/Design/432.All-O-one-Data-Structure) (H) [380.Insert-Delete-GetRandom-O(1)](https://github.com/wisdompeak/LeetCode/tree/master/Design/380.Insert-Delete-GetRandom-O-1/) (M+) [381.Insert-Delete-GetRandom-O1-Duplicates-allowed](https://github.com/wisdompeak/LeetCode/tree/master/Design/381.Insert-Delete-GetRandom-O1-Duplicates-allowed) (H-) [716.Max-Stack](https://github.com/wisdompeak/LeetCode/tree/master/Design/716.Max-Stack) (M+) @@ -301,6 +404,12 @@ [1622.Fancy-Sequence](https://github.com/wisdompeak/LeetCode/tree/master/Design/1622.Fancy-Sequence) (H+) [1801.Number-of-Orders-in-the-Backlog](https://github.com/wisdompeak/LeetCode/tree/master/Design/1801.Number-of-Orders-in-the-Backlog) (M+) [2166.Design-Bitset](https://github.com/wisdompeak/LeetCode/tree/master/Design/2166.Design-Bitset) (M+) +* ``Linked List`` +[146.LRU-Cache](https://github.com/wisdompeak/LeetCode/tree/master/Design/146.LRU-Cache) (H-) +[460.LFU Cache](https://github.com/wisdompeak/LeetCode/tree/master/Design/460.LFU-Cache) (H) +[432.All-O-one-Data-Structure](https://github.com/wisdompeak/LeetCode/tree/master/Design/432.All-O-one-Data-Structure) (H) +[2289.Steps-to-Make-Array-Non-decreasing](https://github.com/wisdompeak/LeetCode/tree/master/Design/2289.Steps-to-Make-Array-Non-decreasing) (H) +[2296.Design-a-Text-Editor](https://github.com/wisdompeak/LeetCode/tree/master/Design/2296.Design-a-Text-Editor) (M+) #### [Stack](https://github.com/wisdompeak/LeetCode/tree/master/Stack) [032.Longest-Valid-Parentheses](https://github.com/wisdompeak/LeetCode/tree/master/Stack/032.Longest-Valid-Parentheses) (H) @@ -318,29 +427,44 @@ [1209.Remove-All-Adjacent-Duplicates-in-String-II](https://github.com/wisdompeak/LeetCode/tree/master/Stack/1209.Remove-All-Adjacent-Duplicates-in-String-II) (M+) [1586.Binary-Search-Tree-Iterator-II](https://github.com/wisdompeak/LeetCode/tree/master/Stack/1586.Binary-Search-Tree-Iterator-II) (H) [2197.Replace-Non-Coprime-Numbers-in-Array](https://github.com/wisdompeak/LeetCode/tree/master/Stack/2197.Replace-Non-Coprime-Numbers-in-Array) (H-) -* ``monotonic stack`` +[2296.Design-a-Text-Editor](https://github.com/wisdompeak/LeetCode/tree/master/Design/2296.Design-a-Text-Editor) (M+) +[2751.Robot-Collisions](https://github.com/wisdompeak/LeetCode/tree/master/Stack/2751.Robot-Collisions) (M+) +[2764.is-Array-a-Preorder-of-Some-Binary-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Stack/2764.is-Array-a-Preorder-of-Some-Binary-Tree) (M+) +* ``monotonic stack: next greater / smaller`` [042.Trapping-Rain-Water](https://github.com/wisdompeak/LeetCode/tree/master/Others/042.Trapping-Rain-Water) (H) -[084.Largest-Rectangle-in-Histogram](https://github.com/wisdompeak/LeetCode/tree/master/Stack/084.Largest-Rectangle-in-Histogram) (H) -[085.Maximal-Rectangle](https://github.com/wisdompeak/LeetCode/tree/master/Stack/085.Maximal-Rectangle) (H-) [255.Verify-Preorder-Sequence-in-Binary-Search-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Tree/255.Verify-Preorder-Sequence-in-Binary-Search-Tree) (H) [496.Next-Greater-Element-I](https://github.com/wisdompeak/LeetCode/tree/master/Stack/496.Next-Greater-Element-I) (H-) [503.Next-Greater-Element-II](https://github.com/wisdompeak/LeetCode/blob/master/Stack/503.Next-Greater-Element-II) (H-) -[221.Maximal-Square](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/221.Maximal-Square) (H-) [654.Maximum-Binary-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Stack/654.Maximum-Binary-Tree) (H) [739.Daily-Temperatures](https://github.com/wisdompeak/LeetCode/tree/master/Stack/739.Daily-Temperatures) (H-) [768.Max-Chunks-To-Make-Sorted-II](https://github.com/wisdompeak/LeetCode/tree/master/Stack/768.Max-Chunks-To-Make-Sorted-II) (H-) [901.Online-Stock-Span](https://github.com/wisdompeak/LeetCode/tree/master/Stack/901.Online-Stock-Span) (H-) -[907.Sum-of-Subarray-Minimums](https://github.com/wisdompeak/LeetCode/tree/master/Stack/907.Sum-of-Subarray-Minimums) (H-) +[907.Sum-of-Subarray-Minimums](https://github.com/wisdompeak/LeetCode/tree/master/Stack/907.Sum-of-Subarray-Minimums) (H-) [1856.Maximum-Subarray-Min-Product](https://github.com/wisdompeak/LeetCode/tree/master/Stack/1856.Maximum-Subarray-Min-Product) (M+) [2104.Sum-of-Subarray-Ranges](https://github.com/wisdompeak/LeetCode/tree/master/Stack/2104.Sum-of-Subarray-Ranges) (H-) -[962.Maximum-Width-Ramp](https://github.com/wisdompeak/LeetCode/tree/master/Stack/962.Maximum-Width-Ramp) (H) [1019.Next-Greater-Node-In-Linked-List](https://github.com/wisdompeak/LeetCode/tree/master/Stack/1019.Next-Greater-Node-In-Linked-List) (M) [1063.Number-of-Valid-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Stack/1063.Number-of-Valid-Subarrays) (M+) -[1124.Longest-Well-Performing-Interval](https://github.com/wisdompeak/LeetCode/tree/master/Stack/1124.Longest-Well-Performing-Interval) (H) +[1124.Longest-Well-Performing-Interval](https://github.com/wisdompeak/LeetCode/tree/master/Stack/1124.Longest-Well-Performing-Interval) (H) [1130.Minimum-Cost-Tree-From-Leaf-Values](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1130.Minimum-Cost-Tree-From-Leaf-Values) (H) -[1944.Number-of-Visible-People-in-a-Queue](https://github.com/wisdompeak/LeetCode/tree/master/Stack/1944.Number-of-Visible-People-in-a-Queue) (H) [1950.Maximum-of-Minimum-Values-in-All-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Stack/1950.Maximum-of-Minimum-Values-in-All-Subarrays) (H-) [1966.Binary-Searchable-Numbers-in-an-Unsorted-Array](https://github.com/wisdompeak/LeetCode/tree/master/Stack/1966.Binary-Searchable-Numbers-in-an-Unsorted-Array) (M+) +[2434.Using-a-Robot-to-Print-the-Lexicographically-Smallest-String](https://github.com/wisdompeak/LeetCode/tree/master/Stack/2434.Using-a-Robot-to-Print-the-Lexicographically-Smallest-String) (H-) +[2454.Next-Greater-Element-IV](https://github.com/wisdompeak/LeetCode/tree/master/Stack/2454.Next-Greater-Element-IV) (H-) +[3113.Find-the-Number-of-Subarrays-Where-Boundary-Elements-Are-Maximum](https://github.com/wisdompeak/LeetCode/tree/master/Stack/3113.Find-the-Number-of-Subarrays-Where-Boundary-Elements-Are-Maximum) (M) +[3676.Count-Bowl-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Stack/3676.Count-Bowl-Subarrays) (M+) +* ``monotonic stack: other usages`` +[084.Largest-Rectangle-in-Histogram](https://github.com/wisdompeak/LeetCode/tree/master/Stack/084.Largest-Rectangle-in-Histogram) (H) +[2334.Subarray-With-Elements-Greater-Than-Varying-Threshold](https://github.com/wisdompeak/LeetCode/tree/master/Stack/2334.Subarray-With-Elements-Greater-Than-Varying-Threshold) (M+) +[085.Maximal-Rectangle](https://github.com/wisdompeak/LeetCode/tree/master/Stack/085.Maximal-Rectangle) (H-) +[2866.Beautiful-Towers-II](https://github.com/wisdompeak/LeetCode/tree/master/Stack/2866.Beautiful-Towers-II) (H) +[1504.Count-Submatrices-With-All-Ones](https://github.com/wisdompeak/LeetCode/tree/master/Stack/1504.Count-Submatrices-With-All-Ones) (H) +[221.Maximal-Square](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/221.Maximal-Square) (H-) +[962.Maximum-Width-Ramp](https://github.com/wisdompeak/LeetCode/tree/master/Stack/962.Maximum-Width-Ramp) (H) +[2863.Maximum-Length-of-Semi-Decreasing-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Stack/2863.Maximum-Length-of-Semi-Decreasing-Subarrays) (H) +[1944.Number-of-Visible-People-in-a-Queue](https://github.com/wisdompeak/LeetCode/tree/master/Stack/1944.Number-of-Visible-People-in-a-Queue) (H) +[2282.Number-of-People-That-Can-Be-Seen-in-a-Grid](https://github.com/wisdompeak/LeetCode/tree/master/Stack/2282.Number-of-People-That-Can-Be-Seen-in-a-Grid) (H) +[2289.Steps-to-Make-Array-Non-decreasing](https://github.com/wisdompeak/LeetCode/tree/master/Design/2289.Steps-to-Make-Array-Non-decreasing) (H) +[2355.Maximum-Number-of-Books-You-Can-Take](https://github.com/wisdompeak/LeetCode/tree/master/Stack/2355.Maximum-Number-of-Books-You-Can-Take) (H) * ``form smallest sequence`` [402.Remove-K-Digits](https://github.com/wisdompeak/LeetCode/tree/master/Stack/402.Remove-K-Digits) (H-) [1673.Find-the-Most-Competitive-Subsequence](https://github.com/wisdompeak/LeetCode/tree/master/Stack/1673.Find-the-Most-Competitive-Subsequence) (M) @@ -371,11 +495,14 @@ [1562.Find-Latest-Group-of-Size-M](https://github.com/wisdompeak/LeetCode/tree/master/Deque/1562.Find-Latest-Group-of-Size-M) (H) [1696.Jump-Game-VI](https://github.com/wisdompeak/LeetCode/tree/master/Deque/1696.Jump-Game-VI) (M+) [1776.Car-Fleet-II](https://github.com/wisdompeak/LeetCode/tree/master/Deque/1776.Car-Fleet-II) (H) +[2398.Maximum-Number-of-Robots-Within-Budget](https://github.com/wisdompeak/LeetCode/tree/master/Deque/2398.Maximum-Number-of-Robots-Within-Budget) (H-) +[2762.Continuous-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Deque/2762.Continuous-Subarrays) (M+) +[2969.Minimum-Number-of-Coins-for-Fruits-II](https://github.com/wisdompeak/LeetCode/tree/master/Deque/2969.Minimum-Number-of-Coins-for-Fruits-II) (H-) +[3578.Count-Partitions-With-Max-Min-Difference-at-Most-K](https://github.com/wisdompeak/LeetCode/tree/master/Deque/3578.Count-Partitions-With-Max-Min-Difference-at-Most-K) (H-) #### [Priority Queue](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue) [004.Median-of-Two-Sorted-Arrays](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/004.Median-of-Two-Sorted-Arrays) (H) -[642.Design-Search-Autocomplete-System](https://github.com/wisdompeak/LeetCode/tree/master/Design/642.Design-Search-Autocomplete-System) (M+) -[774.Minimize-Max-Distance-to-Gas-Station](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/774.Minimize-Max-Distance-to-Gas-Station) (H) +[373.Find-K-Pairs-with-Smallest-Sums](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/373.Find-K-Pairs-with-Smallest-Sums) (H-) [871.Minimum-Number-of-Refueling-Stops](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/871.Minimum-Number-of-Refueling-Stops) (H-) [1057.Campus-Bikes](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/1057.Campus-Bikes) (H-) [1167.Minimum-Cost-to-Connect-Sticks](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/1167.Minimum-Cost-to-Connect-Sticks) (H-) @@ -383,18 +510,32 @@ [1642.Furthest-Building-You-Can-Reach](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/1642.Furthest-Building-You-Can-Reach) (H-) [1705.Maximum-Number-of-Eaten-Apples](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/1705.Maximum-Number-of-Eaten-Apples) (M+) [1792.Maximum-Average-Pass-Ratio](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/1792.Maximum-Average-Pass-Ratio) (M+) +[2263.Make-Array-Non-decreasing-or-Non-increasing](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2263.Make-Array-Non-decreasing-or-Non-increasing) (H) +[2386.Find-the-K-Sum-of-an-Array](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/2386.Find-the-K-Sum-of-an-Array) (H+) +[2931.Maximum-Spending-After-Buying-Items](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/2931.Maximum-Spending-After-Buying-Items) (M) +* ``反悔贪心`` +[630.Course-Schedule-III](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/630.Course-Schedule-III) (H) +[774.Minimize-Max-Distance-to-Gas-Station](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/774.Minimize-Max-Distance-to-Gas-Station) (H) +[2599.Make-the-Prefix-Sum-Non-negative](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/2599.Make-the-Prefix-Sum-Non-negative) (H-) +[3049.Earliest-Second-to-Mark-Indices-II](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/3049.Earliest-Second-to-Mark-Indices-II) (H) +[3645.Maximum-Total-from-Optimal-Activation-Order](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/3645.Maximum-Total-from-Optimal-Activation-Order) (H-) +* ``Dual PQ`` [1801.Number-of-Orders-in-the-Backlog](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/1801.Number-of-Orders-in-the-Backlog) (M) [1882.Process-Tasks-Using-Servers](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/1882.Process-Tasks-Using-Servers) (H) [1942.The-Number-of-the-Smallest-Unoccupied-Chair](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/1942.The-Number-of-the-Smallest-Unoccupied-Chair) (M+) -[2102.Sequentially-Ordinal-Rank-Tracker](https://github.com/wisdompeak/LeetCode/tree/master/Heap/2102.Sequentially-Ordinal-Rank-Tracker) (H-) +[2102.Sequentially-Ordinal-Rank-Tracker](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/2102.Sequentially-Ordinal-Rank-Tracker) (H-) +[2402.Meeting-Rooms-III](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/2402.Meeting-Rooms-III) (M+) +[2653.Sliding-Subarray-Beauty](https://github.com/wisdompeak/LeetCode/tree/master/Sorted_Container/2653.Sliding-Subarray-Beauty) (M+) * ``Sort+PQ`` +[253.Meeting-Rooms-II](https://github.com/wisdompeak/LeetCode/tree/master/Others/253.Meeting-Rooms-II) (M+) [502.IPO](https://github.com/wisdompeak/LeetCode/blob/master/Priority_Queue/502.IPO/) (M+) -[630.Course-Schedule-III](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/630.Course-Schedule-III) (H) [857.Minimum-Cost-to-Hire-K-Workers](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/857.Minimum-Cost-to-Hire-K-Workers) (H) [1353.Maximum-Number-of-Events-That-Can-Be-Attended](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/1353.Maximum-Number-of-Events-That-Can-Be-Attended) (H-) [1383.Maximum-Performance-of-a-Team](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/1383.Maximum-Performance-of-a-Team) (M+) [1834.Single-Threaded-CPU](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/1834.Single-Threaded-CPU) (M) [1851.Minimum-Interval-to-Include-Each-Query](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/1851.Minimum-Interval-to-Include-Each-Query) (H) +[2406.Divide-Intervals-Into-Minimum-Number-of-Groups](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/2406.Divide-Intervals-Into-Minimum-Number-of-Groups) (M+) +[2542.Maximum-Subsequence-Score](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/2542.Maximum-Subsequence-Score) (M+) * ``Arrangement with Stride`` [767.Reorganize-String](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/767.Reorganize-String) (M+) [1054.Distant-Barcodes](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1054.Distant-Barcodes) (M+) @@ -403,6 +544,7 @@ [984.String-Without-AAA-or-BBB](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/984.String-Without-AAA-or-BBB) (M+) [1405.Longest-Happy-String](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1405.Longest-Happy-String) (H-) [1953.Maximum-Number-of-Weeks-for-Which-You-Can-Work](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/1953.Maximum-Number-of-Weeks-for-Which-You-Can-Work) (M+) +[2335.Minimum-Amount-of-Time-to-Fill-Cups](https://github.com/wisdompeak/LeetCode/tree/master/Priority_Queue/2335.Minimum-Amount-of-Time-to-Fill-Cups) (M+) #### [DFS](https://github.com/wisdompeak/LeetCode/tree/master/DFS) [037.Sudoku-Solver](https://github.com/wisdompeak/LeetCode/tree/master/DFS/037.Sudoku-Solver) (M+) @@ -421,11 +563,13 @@ [959.Regions-Cut-By-Slashes](https://github.com/wisdompeak/LeetCode/tree/master/DFS/959.Regions-Cut-By-Slashes) (M+) [1306.Jump-Game-III](https://github.com/wisdompeak/LeetCode/tree/master/DFS/1306.Jump-Game-III) (M) [1718.Construct-the-Lexicographically-Largest-Valid-Sequence](https://github.com/wisdompeak/LeetCode/tree/master/DFS/1718.Construct-the-Lexicographically-Largest-Valid-Sequence) (H-) -[1723.Find-Minimum-Time-to-Finish-All-Jobs](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1723.Find-Minimum-Time-to-Finish-All-Jobs) (H-) [1766.Tree-of-Coprimes](https://github.com/wisdompeak/LeetCode/tree/master/DFS/1766.Tree-of-Coprimes) (H-) [2014.Longest-Subsequence-Repeated-k-Times](https://github.com/wisdompeak/LeetCode/tree/master/DFS/2014.Longest-Subsequence-Repeated-k-Times) (H) [2056.Number-of-Valid-Move-Combinations-On-Chessboard](https://github.com/wisdompeak/LeetCode/tree/master/DFS/2056.Number-of-Valid-Move-Combinations-On-Chessboard) (H) [2065.Maximum-Path-Quality-of-a-Graph](https://github.com/wisdompeak/LeetCode/tree/master/DFS/2065.Maximum-Path-Quality-of-a-Graph) (M) +[2850.Minimum-Moves-to-Spread-Stones-Over-Grid](https://github.com/wisdompeak/LeetCode/tree/master/DFS/2850.Minimum-Moves-to-Spread-Stones-Over-Grid) (M) +[3459.Length-of-Longest-V-Shaped-Diagonal-Segment](https://github.com/wisdompeak/LeetCode/tree/master/DFS/3459.Length-of-Longest-V-Shaped-Diagonal-Segment) (M+) +[3593.Minimum-Increments-to-Equalize-Leaf-Paths](https://github.com/wisdompeak/LeetCode/tree/master/DFS/3593.Minimum-Increments-to-Equalize-Leaf-Paths) (M+) * ``search in an array`` [090.Subsets-II](https://github.com/wisdompeak/LeetCode/tree/master/DFS/090.Subsets-II) (M+) [301.Remove-Invalid-Parentheses](https://github.com/wisdompeak/LeetCode/tree/master/DFS/301.Remove-Invalid-Parentheses) (H) @@ -436,13 +580,23 @@ [1307.Verbal-Arithmetic-Puzzle](https://github.com/wisdompeak/LeetCode/tree/master/DFS/1307.Verbal-Arithmetic-Puzzle) (H) [1593.Split-a-String-Into-the-Max-Number-of-Unique-Substrings](https://github.com/wisdompeak/LeetCode/tree/master/DFS/1593.Split-a-String-Into-the-Max-Number-of-Unique-Substrings) (M) [1681.Minimum-Incompatibility](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1681.Minimum-Incompatibility) (H) +[1723.Find-Minimum-Time-to-Finish-All-Jobs](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1723.Find-Minimum-Time-to-Finish-All-Jobs) (H-) +[2305.Fair-Distribution-of-Cookies](https://github.com/wisdompeak/LeetCode/tree/master/DFS/2305.Fair-Distribution-of-Cookies) (H-) +[2597.The-Number-of-Beautiful-Subsets](https://github.com/wisdompeak/LeetCode/tree/master/DFS/2597.The-Number-of-Beautiful-Subsets) (M+) +[2842.Count-K-Subsequences-of-a-String-With-Maximum-Beauty](https://github.com/wisdompeak/LeetCode/tree/master/DFS/2842.Count-K-Subsequences-of-a-String-With-Maximum-Beauty) (M+) +[3669.Balanced-K-Factor-Decomposition](https://github.com/wisdompeak/LeetCode/tree/master/DFS/3669.Balanced-K-Factor-Decomposition) (M) * ``memorization`` [329.Longest-Increasing-Path-in-a-Matrix](https://github.com/wisdompeak/LeetCode/tree/master/DFS/329.Longest-Increasing-Path-in-a-Matrix) (M) +[2328.Number-of-Increasing-Paths-in-a-Grid](https://github.com/wisdompeak/LeetCode/tree/master/DFS/2328.Number-of-Increasing-Paths-in-a-Grid) (M) [638.Shopping-Offers](https://github.com/wisdompeak/LeetCode/tree/master/DFS/638.Shopping-Offers) (M+) [403.Frog-Jump](https://github.com/wisdompeak/LeetCode/tree/master/DFS/403.Frog-Jump) (M+) [546.Remove-Boxes](https://github.com/wisdompeak/LeetCode/tree/master/DFS/546.Remove-Boxes) (H+) [1340.Jump-Game-V](https://github.com/wisdompeak/LeetCode/tree/master/DFS/1340.Jump-Game-V) (M+) -[1815.Maximum-Number-of-Groups-Getting-Fresh-Donuts](https://github.com/wisdompeak/LeetCode/tree/master/DFS/1815.Maximum-Number-of-Groups-Getting-Fresh-Donuts) (H-) +[1815.Maximum-Number-of-Groups-Getting-Fresh-Donuts](https://github.com/wisdompeak/LeetCode/tree/master/DFS/1815.Maximum-Number-of-Groups-Getting-Fresh-Donuts) (H-) +[2741.Special-Permutations](https://github.com/wisdompeak/LeetCode/tree/master/DFS/2741.Special-Permutations) (M+) +[2746.Decremental-String-Concatenation](https://github.com/wisdompeak/LeetCode/tree/master/DFS/2746.Decremental-String-Concatenation) (H-) +[3213.Construct-String-with-Minimum-Cost](https://github.com/wisdompeak/LeetCode/tree/master/DFS/3213.Construct-String-with-Minimum-Cost) (H-) +[3615.Longest-Palindromic-Path-in-Graph](https://github.com/wisdompeak/LeetCode/tree/master/DFS/3615.Longest-Palindromic-Path-in-Graph) (H-) * ``hidden matrix`` [489.Robot-Room-Cleaner](https://github.com/wisdompeak/LeetCode/blob/master/DFS/489.Robot-Room-Cleaner) (H) [1778.Shortest-Path-in-a-Hidden-Grid](https://github.com/wisdompeak/LeetCode/tree/master/DFS/1778.Shortest-Path-in-a-Hidden-Grid) (H-) @@ -453,6 +607,7 @@ [126.Word-Ladder-II](https://github.com/wisdompeak/LeetCode/tree/master/BFS/126.Word-Ladder-II) (M+) [130.Surrounded-Regions](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/130.Surrounded-Regions) (H-) [200.Number-of-Islands](https://github.com/wisdompeak/LeetCode/tree/master/DFS/200.Number-of-Islands) (H-) +[490.The-Maze](https://github.com/wisdompeak/LeetCode/tree/master/BFS/490.The-Maze) (M) [529.Minesweeper](https://github.com/wisdompeak/LeetCode/tree/master/BFS/529.Minesweeper) (M+) [637.Average-of-Levels-in-Binary-Tree](https://github.com/wisdompeak/LeetCode/tree/master/BFS/637.Average-of-Levels-in-Binary-Tree) (M) [675.Cut-Off-Trees-for-Golf-Event](https://github.com/wisdompeak/LeetCode/tree/master/BFS/675.Cut-Off-Trees-for-Golf-Event) (M) @@ -474,13 +629,20 @@ [1905.Count-Sub-Islands](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1905.Count-Sub-Islands) (M+) [2045.Second-Minimum-Time-to-Reach-Destination](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2045.Second-Minimum-Time-to-Reach-Destination) (M+) [2101.Detonate-the-Maximum-Bombs](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2101.Detonate-the-Maximum-Bombs) (M+) +[2258.Escape-the-Spreading-Fire](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2258.Escape-the-Spreading-Fire) (H+) +[2290.Minimum-Obstacle-Removal-to-Reach-Corner](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2290.Minimum-Obstacle-Removal-to-Reach-Corner) (M+) +[2493.Divide-Nodes-Into-the-Maximum-Number-of-Groups](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2493.Divide-Nodes-Into-the-Maximum-Number-of-Groups) (H-) +[2812.Find-the-Safest-Path-in-a-Grid](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2812.Find-the-Safest-Path-in-a-Grid) (M+) +[3552.Grid-Teleportation-Traversal](https://github.com/wisdompeak/LeetCode/tree/master/BFS/3552.Grid-Teleportation-Traversal) (H-) +[3629.Minimum-Jumps-to-Reach-End-via-Prime-Teleportation](https://github.com/wisdompeak/LeetCode/tree/master/BFS/3629.Minimum-Jumps-to-Reach-End-via-Prime-Teleportation) (M+) * ``Multi State`` [847.Shortest-Path-Visiting-All-Nodes](https://github.com/wisdompeak/LeetCode/tree/master/BFS/847.Shortest-Path-Visiting-All-Nodes) (H-) [864.Shortest-Path-to-Get-All-Keys](https://github.com/wisdompeak/LeetCode/tree/master/BFS/864.Shortest-Path-to-Get-All-Keys) (H-) [913.Cat-and-Mouse](https://github.com/wisdompeak/LeetCode/tree/master/BFS/913.Cat-and-Mouse) (H+) [1728.Cat-and-Mouse-II](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1728.Cat-and-Mouse-II) (H+) [1293.Shortest-Path-in-a-Grid-with-Obstacles-Elimination](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1293.Shortest-Path-in-a-Grid-with-Obstacles-Elimination) (H-) -[1928.Minimum-Cost-to-Reach-Destination-in-Time](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1928.Minimum-Cost-to-Reach-Destination-in-Time) (H-) +[1928.Minimum-Cost-to-Reach-Destination-in-Time](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1928.Minimum-Cost-to-Reach-Destination-in-Time) (H-) +[3568.Minimum-Moves-to-Clean-the-Classroom](https://github.com/wisdompeak/LeetCode/tree/master/BFS/3568.Minimum-Moves-to-Clean-the-Classroom) (H-) * ``拓扑排序`` [207.Course-Schedule](https://github.com/wisdompeak/LeetCode/tree/master/BFS/207.Course-Schedule) (H-) [210.Course-Schedule-II](https://github.com/wisdompeak/LeetCode/tree/master/BFS/210.Course-Schedule-II) (M) @@ -492,25 +654,36 @@ [1203.Sort-Items-by-Groups-Respecting-Dependencies](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1203.Sort-Items-by-Groups-Respecting-Dependencies) (H) [1462.Course-Schedule-IV](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1462.Course-Schedule-IV) (M) [1591.Strange-Printer-II](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1591.Strange-Printer-II) (H-) +[1632.Rank-Transform-of-a-Matrix](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1632.Rank-Transform-of-a-Matrix) (H) [1857.Largest-Color-Value-in-a-Directed-Graph](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1857.Largest-Color-Value-in-a-Directed-Graph) (H-) [2050.Parallel-Courses-III](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2050.Parallel-Courses-III) (M+) [2115.Find-All-Possible-Recipes-from-Given-Supplies](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2115.Find-All-Possible-Recipes-from-Given-Supplies) (M) [2127.Maximum-Employees-to-Be-Invited-to-a-Meeting](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2127.Maximum-Employees-to-Be-Invited-to-a-Meeting) (H) +[2192.All-Ancestors-of-a-Node-in-a-Directed-Acyclic-Graph](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2192.All-Ancestors-of-a-Node-in-a-Directed-Acyclic-Graph) (M) +[2204.Distance-to-a-Cycle-in-Undirected-Graph](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2204.Distance-to-a-Cycle-in-Undirected-Graph) (M) +[2392.Build-a-Matrix-With-Conditions](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2392.Build-a-Matrix-With-Conditions) (M+) +[2440.Create-Components-With-Same-Value](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2440.Create-Components-With-Same-Value) (H-) +[2603.Collect-Coins-in-a-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Graph/2603.Collect-Coins-in-a-Tree) (H-) * ``Dijkstra (BFS+PQ)`` -[743.Network-Delay-Time](https://github.com/wisdompeak/LeetCode/tree/master/BFS/743.Network-Delay-Time) (H) +[743.Network-Delay-Time](https://github.com/wisdompeak/LeetCode/tree/master/BFS/743.Network-Delay-Time) (H-) [407.Trapping-Rain-Water-II](https://github.com/wisdompeak/LeetCode/tree/master/BFS/407.Trapping-Rain-Water-II) (H) [778.Swim-in-Rising-Water](https://github.com/wisdompeak/LeetCode/tree/master/BFS/778.Swim-in-Rising-Water) (H) +[2503.Maximum-Number-of-Points-From-Grid-Queries](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2503.Maximum-Number-of-Points-From-Grid-Queries) (H-) [505.The-Maze-II](https://github.com/wisdompeak/LeetCode/tree/master/BFS/505.The-Maze-II) (H-) [499.The-Maze-III](https://github.com/wisdompeak/LeetCode/tree/master/BFS/499.The-Maze-III) (H) -[787.Cheapest-Flights-Within-K-Stops](https://github.com/wisdompeak/LeetCode/tree/master/Graph/787.Cheapest-Flights-Within-K-Stops) (H) [882.Reachable-Nodes-In-Subdivided-Graph](https://github.com/wisdompeak/LeetCode/tree/master/BFS/882.Reachable-Nodes-In-Subdivided-Graph ) (H) +[1102.Path-With-Maximum-Minimum-Value](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1102.Path-With-Maximum-Minimum-Value) (H-) [1368.Minimum-Cost-to-Make-at-Least-One-Valid-Path-in-a-Grid](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1368.Minimum-Cost-to-Make-at-Least-One-Valid-Path-in-a-Grid) (H) [1514.Path-with-Maximum-Probability](https://github.com/wisdompeak/LeetCode/tree/master/Graph/1514.Path-with-Maximum-Probability) (H) [1786.Number-of-Restricted-Paths-From-First-to-Last-Node](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1786.Number-of-Restricted-Paths-From-First-to-Last-Node) (M+) [1810.Minimum-Path-Cost-in-a-Hidden-Grid](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1810.Minimum-Path-Cost-in-a-Hidden-Grid) (M+) [1976.Number-of-Ways-to-Arrive-at-Destination](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1976.Number-of-Ways-to-Arrive-at-Destination) (M+) [2093.Minimum-Cost-to-Reach-City-With-Discounts](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2093.Minimum-Cost-to-Reach-City-With-Discounts) (H-) +[2714.Find-Shortest-Path-with-K-Hops](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2714.Find-Shortest-Path-with-K-Hops) (M+) [2203.Minimum-Weighted-Subgraph-With-the-Required-Paths](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2203.Minimum-Weighted-Subgraph-With-the-Required-Paths) (H-) +[2473.Minimum-Cost-to-Buy-Apples](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2473.Minimum-Cost-to-Buy-Apples) (M) +[3594.Minimum-Time-to-Transport-All-Individuals](https://github.com/wisdompeak/LeetCode/tree/master/BFS/3594.Minimum-Time-to-Transport-All-Individuals) (H) +[3604.Minimum-Time-to-Reach-Destination-in-Directed-Graph](https://github.com/wisdompeak/LeetCode/tree/master/BFS/3604.Minimum-Time-to-Reach-Destination-in-Directed-Graph) (M+) * ``Dijkstra (for Bipatite Graph)`` [1066.Campus-Bikes-II](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1066.Campus-Bikes-II) (H+) [1879.Minimum-XOR-Sum-of-Two-Arrays](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1879.Minimum-XOR-Sum-of-Two-Arrays) (H) @@ -521,6 +694,7 @@ [1804.Implement-Trie-II-(Prefix-Tree)](https://github.com/wisdompeak/LeetCode/tree/master/Trie/1804.Implement-Trie-II-(Prefix-Tree)) (M+) [211.Add-and-Search-Word](https://github.com/wisdompeak/LeetCode/tree/master/Trie/211.Add-and-Search-Word) (H-) [472.Concatenated-Words](https://github.com/wisdompeak/LeetCode/tree/master/Trie/472.Concatenated-Words) (H-) +[642.Design-Search-Autocomplete-System](https://github.com/wisdompeak/LeetCode/tree/master/Design/642.Design-Search-Autocomplete-System) (H-) [648.Replace-Words](https://github.com/wisdompeak/LeetCode/tree/master/Trie/648.Replace-Words) (H) [588.Design-In-Memory-File-System](https://github.com/wisdompeak/LeetCode/tree/master/Trie/588.Design-In-Memory-File-System) (H-) [677.Map-Sum-Pairs](https://github.com/wisdompeak/LeetCode/tree/master/Trie/677.Map-Sum-Pairs) (M) @@ -532,11 +706,18 @@ [1268.Search-Suggestions-System](https://github.com/wisdompeak/LeetCode/tree/master/Trie/1268.Search-Suggestions-System) (H-) 1032. Stream of Characters (TBD) [1858.Longest-Word-With-All-Prefixes](https://github.com/wisdompeak/LeetCode/tree/master/Trie/1858.Longest-Word-With-All-Prefixes) (M) +[2416.Sum-of-Prefix-Scores-of-Strings](https://github.com/wisdompeak/LeetCode/tree/master/Trie/2416.Sum-of-Prefix-Scores-of-Strings) (M) +[2977.Minimum-Cost-to-Convert-String-II](https://github.com/wisdompeak/LeetCode/tree/master/Trie/2977.Minimum-Cost-to-Convert-String-II) (H) +[3093.Longest-Common-Suffix-Queries](https://github.com/wisdompeak/LeetCode/tree/master/Trie/3093.Longest-Common-Suffix-Queries) (H-) +[3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits](https://github.com/wisdompeak/LeetCode/tree/master/Trie/3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits) (H-) * ``Trie and XOR`` [421.Maximum-XOR-of-Two-Numbers-in-an-Array](https://github.com/wisdompeak/LeetCode/tree/master/Trie/421.Maximum-XOR-of-Two-Numbers-in-an-Array) (H-) [1707.Maximum-XOR-With-an-Element-From-Array](https://github.com/wisdompeak/LeetCode/tree/master/Trie/1707.Maximum-XOR-With-an-Element-From-Array) (H-) [1803.Count-Pairs-With-XOR-in-a-Range](https://github.com/wisdompeak/LeetCode/tree/master/Trie/1803.Count-Pairs-With-XOR-in-a-Range) (H) [1938.Maximum-Genetic-Difference-Query](https://github.com/wisdompeak/LeetCode/tree/master/Trie/1938.Maximum-Genetic-Difference-Query) (H) +[2479.Maximum-XOR-of-Two-Non-Overlapping-Subtrees](https://github.com/wisdompeak/LeetCode/tree/master/Trie/2479.Maximum-XOR-of-Two-Non-Overlapping-Subtrees) (H) +[2935.Maximum-Strong-Pair-XOR-II](https://github.com/wisdompeak/LeetCode/tree/master/Trie/2935.Maximum-Strong-Pair-XOR-II) (H) +[3632.Subarrays-with-XOR-at-Least-K](https://github.com/wisdompeak/LeetCode/tree/master/Trie/3632.Subarrays-with-XOR-at-Least-K) (H) #### [Linked List](https://github.com/wisdompeak/LeetCode/tree/master/Linked_List) [061.Rotate-List](https://github.com/wisdompeak/LeetCode/tree/master/Linked_List/061.Rotate-List) (M) @@ -569,9 +750,7 @@ [221.Maximal-Square](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/221.Maximal-Square) (H-) [1277.Count-Square-Submatrices-with-All-Ones](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1277.Count-Square-Submatrices-with-All-Ones) (M+) [600.Non-negative-Integers-without-Consecutive-Ones](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/600.Non-negative-Integers-without-Consecutive-Ones) (H) -[656.Coin-Path](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/656.Coin-Path) (H-) -[053.Maximum-Subarray](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/053.Maximum-Subarray) (E+) -[152.Maximum-Product-Subarray](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/152.Maximum-Product-Subarray) (M+) +[656.Coin-Path](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/656.Coin-Path) (H-) [818.Race-Car](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/818.Race-Car) (H) [377.Combination-Sum-IV](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/377.Combination-Sum-IV) (M) [837.New-21-Game](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/837.New-21-Game) (H-) @@ -598,9 +777,32 @@ [1955.Count-Number-of-Special-Subsequences](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1955.Count-Number-of-Special-Subsequences) (H-) [2088.Count-Fertile-Pyramids-in-a-Land](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2088.Count-Fertile-Pyramids-in-a-Land) (H-) [2140.Solving-Questions-With-Brainpower](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2140.Solving-Questions-With-Brainpower) (H) +[2189.Number-of-Ways-to-Build-House-of-Cards](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2189.Number-of-Ways-to-Build-House-of-Cards) (H-) +[2218.Maximum-Value-of-K-Coins-From-Piles](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2218.Maximum-Value-of-K-Coins-From-Piles) (H-) +[2222.Number-of-Ways-to-Select-Buildings](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2222.Number-of-Ways-to-Select-Buildings) (M+) +[2312.Selling-Pieces-of-Wood](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2312.Selling-Pieces-of-Wood) (M+) +[2338.Count-the-Number-of-Ideal-Arrays](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2338.Count-the-Number-of-Ideal-Arrays) (H) +[2431.Maximize-Total-Tastiness-of-Purchased-Fruits](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2431.Maximize-Total-Tastiness-of-Purchased-Fruits) (M+) +[2484.Count-Palindromic-Subsequences](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2484.Count-Palindromic-Subsequences) (H-) +[2713.Maximum-Strictly-Inreasing-Cells-in-a-Matrix](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2713.Maximum-Strictly-Inreasing-Cells-in-a-Matrix) (H-) +[2787.Ways-to-Express-an-Integer-as-Sum-of-Powers](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2787.Ways-to-Express-an-Integer-as-Sum-of-Powers) (M+) +[2809.Minimum-Time-to-Make-Array-Sum-At-Most-x](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2809.Minimum-Time-to-Make-Array-Sum-At-Most-x) (H) +[2826.Sorting-Three-Groups](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2826.Sorting-Three-Groups) (M) +[2851.String-Transformation](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2851.String-Transformation) (H+) +[2896.Apply-Operations-to-Make-Two-Strings-Equal](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2896.Apply-Operations-to-Make-Two-Strings-Equal) (H) +[2979.Most-Expensive-Item-That-Can-Not-Be-Bought](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2979.Most-Expensive-Item-That-Can-Not-Be-Bought) (M+) +[3041.Maximize-Consecutive-Elements-in-an-Array-After-Modification](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3041.Maximize-Consecutive-Elements-in-an-Array-After-Modification) (H-) +[3082.Find-the-Sum-of-the-Power-of-All-Subsequences](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3082.Find-the-Sum-of-the-Power-of-All-Subsequences) (H-) +[3098.Find-the-Sum-of-Subsequence-Powers](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3098.Find-the-Sum-of-Subsequence-Powers) (H) +[3389.Minimum-Operations-to-Make-Character-Frequencies-Equal](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3389.Minimum-Operations-to-Make-Character-Frequencies-Equal) (H) +[3654.Minimum-Sum-After-Divisible-Sum-Deletions](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3654.Minimum-Sum-After-Divisible-Sum-Deletions) (H) * ``基本型 I`` [198.House-Robber](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/198.House-Robber) (E) [213.House-Robber-II](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/213.House-Robber-II) (M+) +[2597.The-Number-of-Beautiful-Subsets](https://github.com/wisdompeak/LeetCode/tree/master/DFS/2597.The-Number-of-Beautiful-Subsets) (H) +[2638.Count-the-Number-of-K-Free-Subsets](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2638.Count-the-Number-of-K-Free-Subsets) (M+) +[3186.Maximum-Total-Damage-With-Spell-Casting](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3186.Maximum-Total-Damage-With-Spell-Casting) (M+) +[2320.Count-Number-of-Ways-to-Place-Houses](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2320.Count-Number-of-Ways-to-Place-Houses) (M+) [1388.Pizza-With-3n-Slices](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1388.Pizza-With-3n-Slices) (H-) [276.Paint-Fence](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/276.Paint-Fence) (H-) [265.Paint-House-II](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/265.Paint-House-II) (H) @@ -625,6 +827,11 @@ [1883.Minimum-Skips-to-Arrive-at-Meeting-On-Time](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1883.Minimum-Skips-to-Arrive-at-Meeting-On-Time) (H) [2036.Maximum-Alternating-Subarray-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2036.Maximum-Alternating-Subarray-Sum) (M+) [2143.Choose-Numbers-From-Two-Arrays-in-Range](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2143.Choose-Numbers-From-Two-Arrays-in-Range) (H) +[2318.Number-of-Distinct-Roll-Sequences](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2318.Number-of-Distinct-Roll-Sequences) (H-) +[2361.Minimum-Costs-Using-the-Train-Line](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2361.Minimum-Costs-Using-the-Train-Line) (M+) +[2786.Visit-Array-Positions-to-Maximize-Score](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2786.Visit-Array-Positions-to-Maximize-Score) (M) +[3122.Minimum-Number-of-Operations-to-Satisfy-Conditions.cpp](https://github.com/wisdompeak/LeetCode/blob/master/Dynamic_Programming/3122.Minimum-Number-of-Operations-to-Satisfy-Conditions) (M+) +[3661.Maximum-Walls-Destroyed-by-Robots](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3661.Maximum-Walls-Destroyed-by-Robots) (H-) * ``基本型 II`` [368.Largest-Divisible-Subset](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/368.Largest-Divisible-Subset) (M+) [300.Longest-Increasing-Subsequence](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/300.Longest-Increasing-Subsequence) (M+) @@ -640,6 +847,16 @@ [1626.Best-Team-With-No-Conflicts](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1626.Best-Team-With-No-Conflicts) (M) [1691.Maximum-Height-by-Stacking-Cuboids](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1691.Maximum-Height-by-Stacking-Cuboids) (H) [2188.Minimum-Time-to-Finish-the-Race](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2188.Minimum-Time-to-Finish-the-Race) (H-) +[2209.Minimum-White-Tiles-After-Covering-With-Carpets](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2209.Minimum-White-Tiles-After-Covering-With-Carpets) (M+) +[2430.Maximum-Deletions-on-a-String](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2430.Maximum-Deletions-on-a-String) (M+) +[2464.Minimum-Subarrays-in-a-Valid-Split](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2464.Minimum-Subarrays-in-a-Valid-Split) (M) +[2522.Partition-String-Into-Substrings-With-Values-at-Most-K](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2522.Partition-String-Into-Substrings-With-Values-at-Most-K) (M+) +[3202.Find-the-Maximum-Length-of-Valid-Subsequence-II](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3202.Find-the-Maximum-Length-of-Valid-Subsequence-II) (M) + * `Interval` + [1235.Maximum-Profit-in-Job-Scheduling](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1235.Maximum-Profit-in-Job-Scheduling) (H-) + [1751.Maximum-Number-of-Events-That-Can-Be-Attended-II](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1751.Maximum-Number-of-Events-That-Can-Be-Attended-II) (H) + [2008.Maximum-Earnings-From-Taxi](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2008.Maximum-Earnings-From-Taxi) (M+) + [2830.Maximize-the-Profit-as-the-Salesman](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2830.Maximize-the-Profit-as-the-Salesman) (M) * ``走迷宫型`` [120.Triangle](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/120.Triangle) (E) [174.Dungeon-Game](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/174.Dungeon-Game) (H-) @@ -650,6 +867,9 @@ [1289.Minimum-Falling-Path-Sum-II](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1289.Minimum-Falling-Path-Sum-II) (M+) [1301.Number-of-Paths-with-Max-Score](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1301.Number-of-Paths-with-Max-Score) (M+) [1594.Maximum-Non-Negative-Product-in-a-Matrix](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1594.Maximum-Non-Negative-Product-in-a-Matrix) (M) +[2267.Check-if-There-Is-a-Valid-Parentheses-String-Path](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2267.Check-if-There-Is-a-Valid-Parentheses-String-Path) (H-) +[2435.Paths-in-Matrix-Whose-Sum-Is-Divisible-by-K](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2435.Paths-in-Matrix-Whose-Sum-Is-Divisible-by-K) (M) +[3665.Twisted-Mirror-Path-Count](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3665.Twisted-Mirror-Path-Count) (M) * ``背包型`` [322.Coin-Change](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/322.Coin-Change) (M) [416.Partition-Equal-Subset-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/416.Partition-Equal-Subset-Sum) (M+) @@ -662,6 +882,12 @@ [1049.Last-Stone-Weight-II](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1049.Last-Stone-Weight-II) (H-) [1449.Form-Largest-Integer-With-Digits-That-Add-up-to-Target](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1449.Form-Largest-Integer-With-Digits-That-Add-up-to-Target) (H-) [1981.Minimize-the-Difference-Between-Target-and-Chosen-Elements](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1981.Minimize-the-Difference-Between-Target-and-Chosen-Elements) (M+) +[2291.Maximum-Profit-From-Trading-Stocks](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2291.Maximum-Profit-From-Trading-Stocks) (M) +[2518.Number-of-Great-Partitions](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2518.Number-of-Great-Partitions) (H-) +[2585.Number-of-Ways-to-Earn-Points](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2585.Number-of-Ways-to-Earn-Points) (M) +[2902.Count-of-Sub-Multisets-With-Bounded-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2902.Count-of-Sub-Multisets-With-Bounded-Sum) (H) +[3489.Zero-Array-Transformation-IV](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3489.Zero-Array-Transformation-IV) (H-) +[3592.Inverse-Coin-Change](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3592.Inverse-Coin-Change) (H) * ``键盘型`` [650.2-Keys-Keyboard](https://github.com/wisdompeak/LeetCode/blob/master/Dynamic_Programming/650.2-Keys-Keyboard) (M+) [651.4-Keys-Keyboard](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/651.4-Keys-Keyboard) (M+) @@ -671,15 +897,23 @@ [487.Max-Consecutive-Ones-II](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/487.Max-Consecutive-Ones-II) (H-) [1186.Maximum-Subarray-Sum-with-One-Deletion](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1186.Maximum-Subarray-Sum-with-One-Deletion) (H-) [1187.Make-Array-Strictly-Increasing](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1187.Make-Array-Strictly-Increasing) (H-) -[1909.Remove-One-Element-to-Make-the-Array-Strictly-Increasing](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1909.Remove-One-Element-to-Make-the-Array-Strictly-Increasing) (H-) +[1909.Remove-One-Element-to-Make-the-Array-Strictly-Increasing](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1909.Remove-One-Element-to-Make-the-Array-Strictly-Increasing) (H-) +[3196.Maximize-Total-Cost-of-Alternating-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3196.Maximize-Total-Cost-of-Alternating-Subarrays) (M) * ``区间型 I`` [132.Palindrome-Partitioning-II](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/132.Palindrome-Partitioning-II) (H-) [410.Split-Array-Largest-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/410.Split-Array-Largest-Sum) (H) [813.Largest-Sum-of-Averages](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/813.Largest-Sum-of-Averages) (H-) [1278.Palindrome-Partitioning-III](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1278.Palindrome-Partitioning-III) (H) -[1335.Minimum-Difficulty-of-a-Job-Schedule](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1335.Minimum-Difficulty-of-a-Job-Schedule) (M+) +[1335.Minimum-Difficulty-of-a-Job-Schedule](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1335.Minimum-Difficulty-of-a-Job-Schedule) (M+) [1478.Allocate-Mailboxes](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1478.Allocate-Mailboxes) (H) [1977.Number-of-Ways-to-Separate-Numbers](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1977.Number-of-Ways-to-Separate-Numbers) (H) +[2463.Minimum-Total-Distance-Traveled](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2463.Minimum-Total-Distance-Traveled) (M+) +[2472.Maximum-Number-of-Non-overlapping-Palindrome-Substrings](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2472.Maximum-Number-of-Non-overlapping-Palindrome-Substrings) (M+) +[2478.Number-of-Beautiful-Partitions](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2478.Number-of-Beautiful-Partitions) (H-) +[2547.Minimum-Cost-to-Split-an-Array](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2547.Minimum-Cost-to-Split-an-Array) (M) +[2911.Minimum-Changes-to-Make-K-Semi-palindromes](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2911.Minimum-Changes-to-Make-K-Semi-palindromes) (H-) +[3077.Maximum-Strength-of-K-Disjoint-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3077.Maximum-Strength-of-K-Disjoint-Subarrays) (M+) +[3579.Minimum-Steps-to-Convert-String-with-Operations](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3579.Minimum-Steps-to-Convert-String-with-Operations) (H-) * ``区间型 II`` [131.Palindrome-Partitioning](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/131.Palindrome-Partitioning) (M+) [312.Burst-Balloons](https://github.com/wisdompeak/LeetCode/tree/master/DFS/312.Burst-Balloons) (H-) @@ -697,7 +931,8 @@ [1682.Longest-Palindromic-Subsequence-II](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1682.Longest-Palindromic-Subsequence-II) (H) [1690.Stone-Game-VII](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1690.Stone-Game-VII) (H-) [1745.Palindrome-Partitioning-IV](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1745.Palindrome-Partitioning-IV) (M) -[1770.Maximum-Score-from-Performing-Multiplication-Operations](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1770.Maximum-Score-from-Performing-Multiplication-Operations) (H-) +[1770.Maximum-Score-from-Performing-Multiplication-Operations](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1770.Maximum-Score-from-Performing-Multiplication-Operations) (H-) +[3018.Maximum-Number-of-Removal-Queries-That-Can-Be-Processed-I](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3018.Maximum-Number-of-Removal-Queries-That-Can-Be-Processed-I) (H-) * ``双序列型`` [010.Regular-Expression-Matching](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/010.Regular-Expression-Matching) (H) [044.Wildcard-Matching](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/044.Wildcard-Matching) (H-) @@ -718,7 +953,6 @@ * ``状态压缩DP`` [465.Optimal-Account-Balancing](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/465.Optimal-Account-Balancing) (H) [691.Stickers-to-Spell-Word](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/691.Stickers-to-Spell-Word) (H) -[943.Find-the-Shortest-Superstring](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/943.Find-the-Shortest-Superstring) (H+) [1125.Smallest-Sufficient-Team](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1125.Smallest-Sufficient-Team) (H) [1349.Maximum-Students-Taking-Exam](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1349.Maximum-Students-Taking-Exam) (H) [1411.Number-of-Ways-to-Paint-N×3-Grid](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1411.Number-of-Ways-to-Paint-N%C3%973-Grid) (M) @@ -730,26 +964,44 @@ [1931.Painting-a-Grid-With-Three-Different-Colors](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1931.Painting-a-Grid-With-Three-Different-Colors) (M+) [1994.The-Number-of-Good-Subsets](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1994.The-Number-of-Good-Subsets) (H) [2184.Number-of-Ways-to-Build-Sturdy-Brick-Wall](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2184.Number-of-Ways-to-Build-Sturdy-Brick-Wall) (H-) +[2403.Minimum-Time-to-Kill-All-Monsters](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2403.Minimum-Time-to-Kill-All-Monsters) (M+) +[2572.Count-the-Number-of-Square-Free-Subsets](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2572.Count-the-Number-of-Square-Free-Subsets) (H-) * ``枚举集合的子集`` [1494.Parallel-Courses-II](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1494.Parallel-Courses-II) (H) [1655.Distribute-Repeating-Integers](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1655.Distribute-Repeating-Integers) (H) [1986.Minimum-Number-of-Work-Sessions-to-Finish-the-Tasks](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1986.Minimum-Number-of-Work-Sessions-to-Finish-the-Tasks) (M+) - [2152.Minimum-Number-of-Lines-to-Cover-Points](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2152.Minimum-Number-of-Lines-to-Cover-Points) (H-) + [2152.Minimum-Number-of-Lines-to-Cover-Points](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2152.Minimum-Number-of-Lines-to-Cover-Points) (H-) + [3444.Minimum-Increments-for-Target-Multiples-in-an-Array](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3444.Minimum-Increments-for-Target-Multiples-in-an-Array) (H-) * ``带权二分图`` -[1066.Campus-Bikes-II](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1066.Campus-Bikes-II) (H+) -[1595.Minimum-Cost-to-Connect-Two-Groups-of-Points](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1595.Minimum-Cost-to-Connect-Two-Groups-of-Points) (H) -[1879.Minimum-XOR-Sum-of-Two-Arrays](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1879.Minimum-XOR-Sum-of-Two-Arrays) (H) -[1947.Maximum-Compatibility-Score-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1947.Maximum-Compatibility-Score-Sum) (H) -[2172.Maximum-AND-Sum-of-Array](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2172.Maximum-AND-Sum-of-Array) (H) + [1066.Campus-Bikes-II](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1066.Campus-Bikes-II) (H+) + [1595.Minimum-Cost-to-Connect-Two-Groups-of-Points](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1595.Minimum-Cost-to-Connect-Two-Groups-of-Points) (H) + [1879.Minimum-XOR-Sum-of-Two-Arrays](https://github.com/wisdompeak/LeetCode/tree/master/BFS/1879.Minimum-XOR-Sum-of-Two-Arrays) (H) + [1947.Maximum-Compatibility-Score-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1947.Maximum-Compatibility-Score-Sum) (H) + [2172.Maximum-AND-Sum-of-Array](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2172.Maximum-AND-Sum-of-Array) (H) + * ``TSP`` + [943.Find-the-Shortest-Superstring](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/943.Find-the-Shortest-Superstring) (H+) + [2247.Maximum-Cost-of-Trip-With-K-Highways](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2247.Maximum-Cost-of-Trip-With-K-Highways) (H) * ``Catalan`` [096.Unique-Binary-Search-Trees](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/096.Unique-Binary-Search-Trees) (M+) [1259.Handshakes-That-Don't-Cross](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1259.Handshakes-That-Don't-Cross) (M+) * ``Permutation`` [629.K-Inverse-Pairs-Array](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/629.K-Inverse-Pairs-Array) (H) [903.Valid-Permutations-for-DI-Sequence](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/903.Valid-Permutations-for-DI-Sequence) (H) -[1866.Number-of-Ways-to-Rearrange-Sticks-With-K-Sticks-Visible](https://github.com/wisdompeak/LeetCode/tree/master/Math/1866.Number-of-Ways-to-Rearrange-Sticks-With-K-Sticks-Visible) (H) +[1866.Number-of-Ways-to-Rearrange-Sticks-With-K-Sticks-Visible](https://github.com/wisdompeak/LeetCode/tree/master/Math/1866.Number-of-Ways-to-Rearrange-Sticks-With-K-Sticks-Visible) (H) +[3193.Count-the-Number-of-Inversions](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3193.Count-the-Number-of-Inversions) (H) * ``Infer future from current`` -[2044.Count-Number-of-Maximum-Bitwise-OR-Subsets](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2044.Count-Number-of-Maximum-Bitwise-OR-Subsets) (M) +[2044.Count-Number-of-Maximum-Bitwise-OR-Subsets](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2044.Count-Number-of-Maximum-Bitwise-OR-Subsets) (M) +[2742.Painting-the-Walls](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2742.Painting-the-Walls) (H) +[3538.Merge-Operations-for-Minimum-Travel-Time](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3538.Merge-Operations-for-Minimum-Travel-Time) (H) +* ``maximum subarray`` +[053.Maximum-Subarray](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/053.Maximum-Subarray) (E+) +[152.Maximum-Product-Subarray](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/152.Maximum-Product-Subarray) (M+) +[2272.Substring-With-Largest-Variance](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2272.Substring-With-Largest-Variance) (H-) +[2321.Maximum-Score-Of-Spliced-Array](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2321.Maximum-Score-Of-Spliced-Array) (H-) +* ``前缀和辅助`` +[3130.Find-All-Possible-Stable-Binary-Arrays-II](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3130.Find-All-Possible-Stable-Binary-Arrays-II) (H) +* ``遍历优化`` +[3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II)](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/3177.Find-the-Maximum-Length-of-a-Good-Subsequence-II)) (H) #### [Bit Manipulation](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation) [137.Single-Number-II](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/137.Single-Number-II) (H-) @@ -757,9 +1009,18 @@ [371.Sum-of-Two-Integers](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/371.Sum-of-Two-Integers) (H) [318.Maximum-Product-of-Word-Lengths](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/318.Maximum-Product-of-Word-Lengths) (M+) 342.Power-of-Four (H) +[898.Bitwise-ORs-of-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/898.Bitwise-ORs-of-Subarrays) (H-) [957.Prison-Cells-After-N-Days](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/957.Prison-Cells-After-N-Days) (H) 1461.Check-If-a-String-Contains-All-Binary-Codes-of-Size-K (TBD) -[1521.Find-a-Value-of-a-Mysterious-Function-Closest-to-Target](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/1521.Find-a-Value-of-a-Mysterious-Function-Closest-to-Target) (H-) +[2505.Bitwise-OR-of-All-Subsequence-Sums](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/2505.Bitwise-OR-of-All-Subsequence-Sums) (H) +[2680.Maximum-OR](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/2680.Maximum-OR) (M+) +[2802.Find-The-K-th-Lucky-Number](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/2802.Find-The-K-th-Lucky-Number) (M+) +[2992.Number-of-Self-Divisible-Permutations](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/2992.Number-of-Self-Divisible-Permutations) (M+) +[3133.Minimum-Array-End](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/3133.Minimum-Array-End) (M+) +* ``Prefix Hashing`` +[1521.Find-a-Value-of-a-Mysterious-Function-Closest-to-Target](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/1521.Find-a-Value-of-a-Mysterious-Function-Closest-to-Target) (H-) +[3171.Find-Subarray-With-Bitwise-OR-Closest-to-K](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/3171.Find-Subarray-With-Bitwise-AND-Closest-to-K) (H) +[3209.Number-of-Subarrays-With-AND-Value-of-K](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/3209.Number-of-Subarrays-With-AND-Value-of-K) (M+) * ``XOR`` [136.Single-Number](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/136.Single-Number) (M) [268.Missing-Number](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/268.Missing-Number) (H-) @@ -769,7 +1030,9 @@ [1734.Decode-XORed-Permutation](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/1734.Decode-XORed-Permutation) (M+) [1738.Find-Kth-Largest-XOR-Coordinate-Value](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/1738.Find-Kth-Largest-XOR-Coordinate-Value) (M+) [1835.Find-XOR-Sum-of-All-Pairs-Bitwise-AND](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/1835.Find-XOR-Sum-of-All-Pairs-Bitwise-AND) (M) +[2527.Find-Xor-Beauty-of-Array](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/2527.Find-Xor-Beauty-of-Array) (H) * ``Bit Mask`` +[320.Generalized-Abbreviation](https://github.com/wisdompeak/LeetCode/tree/master/String/320.Generalized-Abbreviation) (M) [1239.Maximum-Length-of-a-Concatenated-String-with-Unique-Characters](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/1239.Maximum-Length-of-a-Concatenated-String-with-Unique-Characters) (M+) [1284.Minimum-Number-of-Flips-to-Convert-Binary-Matrix-to-Zero-Matrix](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/1284.Minimum-Number-of-Flips-to-Convert-Binary-Matrix-to-Zero-Matrix) (M+) [1452.People-Whose-List-of-Favorite-Companies-Is-Not-a-Subset-of-Another-List](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/1452.People-Whose-List-of-Favorite-Companies-Is-Not-a-Subset-of-Another-List) (H-) @@ -777,6 +1040,8 @@ [1774.Closest-Dessert-Cost](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/1774.Closest-Dessert-Cost) (M) [2002.Maximum-Product-of-the-Length-of-Two-Palindromic-Subsequences](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/2002.Maximum-Product-of-the-Length-of-Two-Palindromic-Subsequences) (M) [2151.Maximum-Good-People-Based-on-Statements](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/2151.Maximum-Good-People-Based-on-Statements) (M+) +[2397.Maximum-Rows-Covered-by-Columns](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/2397.Maximum-Rows-Covered-by-Columns) (M) +[3116.Kth-Smallest-Amount-With-Single-Denomination-Combination](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/3116.Kth-Smallest-Amount-With-Single-Denomination-Combination) (H) * ``Meet in the Middle`` [1755.Closest-Subsequence-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/1755.Closest-Subsequence-Sum) (H) [2035.Partition-Array-Into-Two-Arrays-to-Minimize-Sum-Difference](https://github.com/wisdompeak/LeetCode/tree/master/Bit_Manipulation/2035.Partition-Array-Into-Two-Arrays-to-Minimize-Sum-Difference) (H) @@ -786,6 +1051,7 @@ [327.Count-of-Range-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Divide_Conquer/327.Count-of-Range-Sum) (H-) [493.Reverse-Pairs](https://github.com/wisdompeak/LeetCode/tree/master/Divide_Conquer/493.Reverse-Pairs) (M+) [1649.Create-Sorted-Array-through-Instructions](https://github.com/wisdompeak/LeetCode/tree/master/Divide_Conquer/1649.Create-Sorted-Array-through-Instructions) (H) +[2426.Number-of-Pairs-Satisfying-Inequality](https://github.com/wisdompeak/LeetCode/tree/master/Divide_Conquer/2426.Number-of-Pairs-Satisfying-Inequality) (H-) #### [String](https://github.com/wisdompeak/LeetCode/tree/master/String) [006.ZigZag-Conversion](https://github.com/wisdompeak/LeetCode/tree/master/String/006.ZigZag-Conversion) (M+) @@ -794,8 +1060,6 @@ [388.Longest-Absolute-File-Path](https://github.com/wisdompeak/LeetCode/tree/master/String/388.Longest-Absolute-File-Path) (M+) [418.Sentence-Screen-Fitting](https://github.com/wisdompeak/LeetCode/tree/master/String/418.Sentence-Screen-Fitting) (M+) [423.Reconstruct-Original-Digits-from-English](https://github.com/wisdompeak/LeetCode/tree/master/Others/423.Reconstruct-Original-Digits-from-English) (H-) -[527.Word-Abbreviation](https://github.com/wisdompeak/LeetCode/tree/master/String/527.Word-Abbreviation) (M+) -[556.Next Greater Element III](https://github.com/wisdompeak/LeetCode/tree/master/String/556.Next-Greater-Element-III) (H-) 616.Add-Bold-Tag-in-String (M) [467.Unique-Substrings-in-Wraparound-String](https://github.com/wisdompeak/LeetCode/tree/master/String/467.Unique-Substrings-in-Wraparound-String) (H-) [564.Find-the-Closest-Palindrome](https://github.com/wisdompeak/LeetCode/tree/master/String/564.Find-the-Closest-Palindrome) (H) @@ -806,10 +1070,11 @@ [1616.Split-Two-Strings-to-Make-Palindrome](https://github.com/wisdompeak/LeetCode/tree/master/String/1616.Split-Two-Strings-to-Make-Palindrome) (M+) [1754.Largest-Merge-Of-Two-Strings](https://github.com/wisdompeak/LeetCode/tree/master/String/1754.Largest-Merge-Of-Two-Strings) (M+) [1849.Splitting-a-String-Into-Descending-Consecutive-Values](https://github.com/wisdompeak/LeetCode/tree/master/String/1849.Splitting-a-String-Into-Descending-Consecutive-Values) (M+) +[2468.Split-Message-Based-on-Limit](https://github.com/wisdompeak/LeetCode/tree/master/String/2468.Split-Message-Based-on-Limit) (H-) * ``Abbreviation`` -[320.Generalized-Abbreviation](https://github.com/wisdompeak/LeetCode/tree/master/String/320.Generalized-Abbreviation) (M) [408.Valid-Word-Abbreviation](https://github.com/wisdompeak/LeetCode/tree/master/String/408.Valid-Word-Abbreviation) (M) [411.Minimum-Unique-Word-Abbreviation](https://github.com/wisdompeak/LeetCode/tree/master/String/411.Minimum-Unique-Word-Abbreviation) (H) +[527.Word-Abbreviation](https://github.com/wisdompeak/LeetCode/tree/master/String/527.Word-Abbreviation) (M+) [2060.Check-if-an-Original-String-Exists-Given-Two-Encoded-Strings](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/2060.Check-if-an-Original-String-Exists-Given-Two-Encoded-Strings) (H) * ``Rolling Hash`` [1044.Longest-Duplicate-Substring](https://github.com/wisdompeak/LeetCode/tree/master/String/1044.Longest-Duplicate-Substring) (H) @@ -819,6 +1084,10 @@ [1923.Longest-Common-Subpath](https://github.com/wisdompeak/LeetCode/tree/master/String/1923.Longest-Common-Subpath) (H) [2156.Find-Substring-With-Given-Hash-Value](https://github.com/wisdompeak/LeetCode/tree/master/String/2156.Find-Substring-With-Given-Hash-Value) (M) [2168.Unique-Substrings-With-Equal-Digit-Frequency](https://github.com/wisdompeak/LeetCode/tree/master/String/2168.Unique-Substrings-With-Equal-Digit-Frequency) (M+) +[2223.Sum-of-Scores-of-Built-Strings](https://github.com/wisdompeak/LeetCode/tree/master/String/2223.Sum-of-Scores-of-Built-Strings) (H-) +[2261.K-Divisible-Elements-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/String/2261.K-Divisible-Elements-Subarrays) (H-) +[2781.Length-of-the-Longest-Valid-Substring](https://github.com/wisdompeak/LeetCode/tree/master/String/2781.Length-of-the-Longest-Valid-Substring) (H-) +[3388.Count-Beautiful-Splits-in-an-Array](https://github.com/wisdompeak/LeetCode/tree/master/String/3388.Count-Beautiful-Splits-in-an-Array) (H-) * ``KMP`` [1392.Longest-Happy-Prefix](https://github.com/wisdompeak/LeetCode/tree/master/String/1392.Longest-Happy-Prefix) (H) [028.Implement-strStr](https://github.com/wisdompeak/LeetCode/tree/master/String/028.Implement-strStr) (H) @@ -828,6 +1097,11 @@ [1367.Linked-List-in-Binary-Tree](https://github.com/wisdompeak/LeetCode/tree/master/String/1367.Linked-List-in-Binary-Tree) (H) 1397.Find All Good Strings (TBD) [1764.Form-Array-by-Concatenating-Subarrays-of-Another-Array](https://github.com/wisdompeak/LeetCode/tree/master/String/1764.Form-Array-by-Concatenating-Subarrays-of-Another-Array) (H) +[2301.Match-Substring-After-Replacement](https://github.com/wisdompeak/LeetCode/tree/master/String/2301.Match-Substring-After-Replacement) (H-) +[2851.String-Transformation](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/2851.String-Transformation) (H+) +[3008.Find-Beautiful-Indices-in-the-Given-Array-II](https://github.com/wisdompeak/LeetCode/tree/master/String/3008.Find-Beautiful-Indices-in-the-Given-Array-II) (H-) +[3031.Minimum-Time-to-Revert-Word-to-Initial-State-II](https://github.com/wisdompeak/LeetCode/tree/master/String/3031.Minimum-Time-to-Revert-Word-to-Initial-State-II) (H) +[3045.Count-Prefix-and-Suffix-Pairs-II](https://github.com/wisdompeak/LeetCode/tree/master/String/3045.Count-Prefix-and-Suffix-Pairs-II) (H) * ``Manacher`` [005.Longest-Palindromic-Substring](https://github.com/wisdompeak/LeetCode/tree/master/String/005.Longest-Palindromic-Substring) (H) [214.Shortest-Palindrome](https://github.com/wisdompeak/LeetCode/blob/master/String/214.Shortest-Palindrome) (H) @@ -854,35 +1128,41 @@ [1202.Smallest-String-With-Swaps](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1202.Smallest-String-With-Swaps) (M+) [1319.Number-of-Operations-to-Make-Network-Connected](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1319.Number-of-Operations-to-Make-Network-Connected) (M+) [1632.Rank-Transform-of-a-Matrix](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1632.Rank-Transform-of-a-Matrix) (H) -[1631.Path-With-Minimum-Effort](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1631.Path-With-Minimum-Effort) (H-) -[1697.Checking-Existence-of-Edge-Length-Limited-Paths](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1697.Checking-Existence-of-Edge-Length-Limited-Paths) (H-) [1724.Checking-Existence-of-Edge-Length-Limited-Paths-II](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1724.Checking-Existence-of-Edge-Length-Limited-Paths-II) (H+) [1722.Minimize-Hamming-Distance-After-Swap-Operations](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1722.Minimize-Hamming-Distance-After-Swap-Operations) (M+) -[803.Bricks-Falling-When-Hit](https://github.com/wisdompeak/LeetCode/tree/master/DFS/803.Bricks-Falling-When-Hit) (H) -[1970.Last-Day-Where-You-Can-Still-Cross](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1970.Last-Day-Where-You-Can-Still-Cross) (H-) [2076.Process-Restricted-Friend-Requests](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/2076.Process-Restricted-Friend-Requests) (H-) [2092.Find-All-People-With-Secret](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/2092.Find-All-People-With-Secret) (H-) [2157.Groups-of-Strings](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/2157.Groups-of-Strings) (H) +[2492.Minimum-Score-of-a-Path-Between-Two-Cities](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/2492.Minimum-Score-of-a-Path-Between-Two-Cities) (M) +[2867.Count-Valid-Paths-in-a-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/2867.Count-Valid-Paths-in-a-Tree) (M+) +* ``Union in an order`` +[803.Bricks-Falling-When-Hit](https://github.com/wisdompeak/LeetCode/tree/master/DFS/803.Bricks-Falling-When-Hit) (H) +[1970.Last-Day-Where-You-Can-Still-Cross](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1970.Last-Day-Where-You-Can-Still-Cross) (H-) +[1631.Path-With-Minimum-Effort](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1631.Path-With-Minimum-Effort) (H-) +[1697.Checking-Existence-of-Edge-Length-Limited-Paths](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1697.Checking-Existence-of-Edge-Length-Limited-Paths) (H-) +[2421.Number-of-Good-Paths](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/2421.Number-of-Good-Paths) (H) * ``Prime Factors`` [952.Largest-Component-Size-by-Common-Factor](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/952.Largest-Component-Size-by-Common-Factor) (H) [1627.Graph-Connectivity-With-Threshold](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1627.Graph-Connectivity-With-Threshold) (M+) [1998.GCD-Sort-of-an-Array](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1998.GCD-Sort-of-an-Array) (H-) +[2709.Greatest-Common-Divisor-Traversal](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/2709.Greatest-Common-Divisor-Traversal) (H-) * ``MST`` [1135.Connecting-Cities-With-Minimum-Cost](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1135.Connecting-Cities-With-Minimum-Cost) (M+) [1168.Optimize-Water-Distribution-in-a-Village](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1168.Optimize-Water-Distribution-in-a-Village) (H-) [1489.Find-Critical-and-Pseudo-Critical-Edges-in-Minimum-Spanning-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1489.Find-Critical-and-Pseudo-Critical-Edges-in-Minimum-Spanning-Tree) (H) [1579.Remove-Max-Number-of-Edges-to-Keep-Graph-Fully-Traversable](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1579.Remove-Max-Number-of-Edges-to-Keep-Graph-Fully-Traversable) (H-) -[1584.Min-Cost-to-Connect-All-Points](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1584.Min-Cost-to-Connect-All-Points) (H-) +[1584.Min-Cost-to-Connect-All-Points](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/1584.Min-Cost-to-Connect-All-Points) (H-) +[3600.Maximize-Spanning-Tree-Stability-with-Upgrades](https://github.com/wisdompeak/LeetCode/tree/master/Union_Find/3600.Maximize-Spanning-Tree-Stability-with-Upgrades) (H) #### [Recursion](https://github.com/wisdompeak/LeetCode/tree/master/Recursion) [087.Scramble-String](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/087.Scramble-String) (H-) [133.Clone-Graph](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/133.Clone-Graph) (M+) [213.House-Robber-II](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/213.House-Robber-II) (H-) [337.House-Robber-III](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/337.House-Robber-III) (M+) +[2378.Choose-Edges-to-Maximize-Score-in-a-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/2378.Choose-Edges-to-Maximize-Score-in-a-Tree) (H-) [390.Elimination-Game](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/390.Elimination-Game) (H) [395.Longest-Substring-with-At-Least-K-Repeating-Characters](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/395.Longest-Substring-with-At-Least-K-Repeating-Characters) (H) [397.Integer-Replacement](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/397.Integer-Replacement) (M+) -440.K-th-Smallest-in-Lexicographical-Order (H) [761.Special-Binary-String](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/761.Special-Binary-String) (H) 779.K-th-Symbol-in-Grammar (M) [780.Reaching-Points](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/780.Reaching-Points) (H-) @@ -894,10 +1174,9 @@ [1088.Confusing-Number-II](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/1088.Confusing-Number-II) (H) [1199.Minimum-Time-to-Build-Blocks](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/1199.Minimum-Time-to-Build-Blocks) (H+) [1274.Number-of-Ships-in-a-Rectangle](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/1274.Number-of-Ships-in-a-Rectangle) (M) -[1415.The-k-th-Lexicographical-String-of-All-Happy-Strings-of-Length-n](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/1415.The-k-th-Lexicographical-String-of-All-Happy-Strings-of-Length-n) (H-) -1545. Find Kth Bit in Nth Binary String (TBD) [1553.Minimum-Number-of-Days-to-Eat-N-Oranges](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/1553.Minimum-Number-of-Days-to-Eat-N-Oranges) (H) [1611.Minimum-One-Bit-Operations-to-Make-Integers-Zero](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/1611.Minimum-One-Bit-Operations-to-Make-Integers-Zero) (H) +[2998.Minimum-Number-of-Operations-to-Make-X-and-Y-Equal](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/2998.Minimum-Number-of-Operations-to-Make-X-and-Y-Equal) (M+) * ``Evaluate Expressions`` [241.Different-Ways-to-Add-Parentheses](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/241.Different-Ways-to-Add-Parentheses) (M+) [2019.The-Score-of-Students-Solving-Math-Expression](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/2019.The-Score-of-Students-Solving-Math-Expression) (H-) @@ -910,6 +1189,19 @@ [1510.Stone-Game-IV](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/1510.Stone-Game-IV) (M) [1563.Stone-Game-V](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1563.Stone-Game-V) (H-) [2029.Stone-Game-IX](https://github.com/wisdompeak/LeetCode/tree/master/Others/2029.Stone-Game-IX) (H) +* ``Digit counting & finding`` +[440.K-th-Smallest-in-Lexicographical-Order](https://github.com/wisdompeak/LeetCode/tree/master/Others/440.K-th-Smallest-in-Lexicographical-Order) (H-) +[1012.Numbers-With-Repeated-Digits](https://github.com/wisdompeak/LeetCode/tree/master/Math/1012.Numbers-With-Repeated-Digits) (H-) +[1415.The-k-th-Lexicographical-String-of-All-Happy-Strings-of-Length-n](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/1415.The-k-th-Lexicographical-String-of-All-Happy-Strings-of-Length-n) (H-) +[1545.Find-Kth-Bit-in-Nth-Binary-String](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/1545.Find-Kth-Bit-in-Nth-Binary-String) (M+) +[2376.Count-Special-Integers](https://github.com/wisdompeak/LeetCode/tree/master/Others/2376.Count-Special-Integers) (M+) +[2719.Count-of-Integers](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/2719.Count-of-Integers) (H) +[2801.Count-Stepping-Numbers-in-Range](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/2801.Count-Stepping-Numbers-in-Range) (H) +[2827.Number-of-Beautiful-Integers-in-the-Range](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/2827.Number-of-Beautiful-Integers-in-the-Range) (H) +[2999.Count-the-Number-of-Powerful-Integers](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/2999.Count-the-Number-of-Powerful-Integers) (H-) +[3307.Find-the-K-th-Character-in-String-Game-II](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/3307.Find-the-K-th-Character-in-String-Game-II) (M) +[3490.Count-Beautiful-Numbers](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/3490.Count-Beautiful-Numbers) (M+) +[3614.Process-String-with-Special-Operations-II](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/3614.Process-String-with-Special-Operations-II) (H-) #### [Graph](https://github.com/wisdompeak/LeetCode/tree/master/Graph/) [332.Reconstruct-Itinerary](https://github.com/wisdompeak/LeetCode/tree/master/DFS/332.Reconstruct-Itinerary) (H) @@ -917,23 +1209,42 @@ [753.Cracking-the-Safe](https://github.com/wisdompeak/LeetCode/tree/master/Hash/753.Cracking-the-Safe) (H) [1059.All-Paths-from-Source-Lead-to-Destination](https://github.com/wisdompeak/LeetCode/tree/master/Graph/1059.All-Paths-from-Source-Lead-to-Destination) (H) [1192.Critical-Connections-in-a-Network](https://github.com/wisdompeak/LeetCode/tree/master/DFS/1192.Critical-Connections-in-a-Network) (H) -1334.Find-the-City-With-the-Smallest-Number-of-Neighbors-at-a-Threshold-Distance (TBD) 1361.Validate-Binary-Tree-Nodes (TBD) [1719.Number-Of-Ways-To-Reconstruct-A-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Graph/1719.Number-Of-Ways-To-Reconstruct-A-Tree) (H+) [1761.Minimum-Degree-of-a-Connected-Trio-in-a-Graph](https://github.com/wisdompeak/LeetCode/tree/master/Graph/1761.Minimum-Degree-of-a-Connected-Trio-in-a-Graph) (M+) [1782.Count-Pairs-Of-Nodes](https://github.com/wisdompeak/LeetCode/tree/master/Graph/1782.Count-Pairs-Of-Nodes) (H) +[2360.Longest-Cycle-in-a-Graph](https://github.com/wisdompeak/LeetCode/tree/master/Graph/2360.Longest-Cycle-in-a-Graph) (M+) +[2508.Add-Edges-to-Make-Degrees-of-All-Nodes-Even](https://github.com/wisdompeak/LeetCode/tree/master/Graph/2508.Add-Edges-to-Make-Degrees-of-All-Nodes-Even) (H-) +[2556.Disconnect-Path-in-a-Binary-Matrix-by-at-Most-One-Flip](https://github.com/wisdompeak/LeetCode/tree/master/Graph/2556.Disconnect-Path-in-a-Binary-Matrix-by-at-Most-One-Flip) (H) +[2603.Collect-Coins-in-a-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Graph/2603.Collect-Coins-in-a-Tree) (H-) +[2608.Shortest-Cycle-in-a-Graph](https://github.com/wisdompeak/LeetCode/tree/master/Graph/2608.Shortest-Cycle-in-a-Graph) (M+) +[2791.Count-Paths-That-Can-Form-a-Palindrome-in-a-Tree](https://github.com/wisdompeak/LeetCode/tree/master/Graph/2791.Count-Paths-That-Can-Form-a-Palindrome-in-a-Tree) (H) +[2876.Count-Visited-Nodes-in-a-Directed-Graph](https://github.com/wisdompeak/LeetCode/tree/master/Graph/2876.Count-Visited-Nodes-in-a-Directed-Graph) (M+) +[3017.Count-the-Number-of-Houses-at-a-Certain-Distance-II](https://github.com/wisdompeak/LeetCode/tree/master/Graph/3017.Count-the-Number-of-Houses-at-a-Certain-Distance-II) (H) +* ``Dijkstra`` +[787.Cheapest-Flights-Within-K-Stops](https://github.com/wisdompeak/LeetCode/tree/master/Graph/787.Cheapest-Flights-Within-K-Stops) (H) +[2577.Minimum-Time-to-Visit-a-Cell-In-a-Grid](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2577.Minimum-Time-to-Visit-a-Cell-In-a-Grid) (H-) +[2662.Minimum-Cost-of-a-Path-With-Special-Roads](https://github.com/wisdompeak/LeetCode/tree/master/BFS/2662.Minimum-Cost-of-a-Path-With-Special-Roads) (H-) +[2699.Modify-Graph-Edge-Weights](https://github.com/wisdompeak/LeetCode/tree/master/Graph/2699.Modify-Graph-Edge-Weights) (H) +[3112.Minimum-Time-to-Visit-Disappearing-Nodes](https://github.com/wisdompeak/LeetCode/tree/master/Graph/3112.Minimum-Time-to-Visit-Disappearing-Nodes) (M) +[3123.Find-Edges-in-Shortest-Paths](https://github.com/wisdompeak/LeetCode/tree/master/Graph/3123.Find-Edges-in-Shortest-Paths) (H-) +* ``Floyd`` +[1334.Find-the-City-With-the-Smallest-Number-of-Neighbors-at-a-Threshold-Distance](https://github.com/wisdompeak/LeetCode/tree/master/Graph/1334.Find-the-City-With-the-Smallest-Number-of-Neighbors-at-a-Threshold-Distance) (M) +[2642.Design-Graph-With-Shortest-Path-Calculator](https://github.com/wisdompeak/LeetCode/tree/master/Graph/2642.Design-Graph-With-Shortest-Path-Calculator) (M+) +[2959.Number-of-Possible-Sets-of-Closing-Branches](https://github.com/wisdompeak/LeetCode/tree/master/Graph/2959.Number-of-Possible-Sets-of-Closing-Branches) (M+) +[2976.Minimum-Cost-to-Convert-String-I](https://github.com/wisdompeak/LeetCode/tree/master/Graph/2976.Minimum-Cost-to-Convert-String-I) (M+) +[3387.Maximize-Amount-After-Two-Days-of-Conversions](https://github.com/wisdompeak/LeetCode/tree/master/Graph/3387.Maximize-Amount-After-Two-Days-of-Conversions) (H-) * ``Hungarian Algorithm`` [1820.Maximum-Number-of-Accepted-Invitations](https://github.com/wisdompeak/LeetCode/tree/master/Graph/1820.Maximum-Number-of-Accepted-Invitations) (H) [2123.Minimum-Operations-to-Remove-Adjacent-Ones-in-Matrix](https://github.com/wisdompeak/LeetCode/tree/master/Graph/2123.Minimum-Operations-to-Remove-Adjacent-Ones-in-Matrix) (H) #### [Math](https://github.com/wisdompeak/LeetCode/tree/master/Math) [089.Gray-Code](https://github.com/wisdompeak/LeetCode/tree/master/Math/089.Gray-Code) (M+) (aka. 1238. Circular Permutation in Binary Representation) -[233.Number-of-Digit-One](https://github.com/wisdompeak/LeetCode/tree/master/Math/233.Number-of-Digit-One) (H-) 458.Poor-Pigs (H) [400.n-th-digit](https://github.com/wisdompeak/LeetCode/tree/master/Math/400.n-th-digit) (M) [441.Arranging-Coins](https://github.com/wisdompeak/LeetCode/tree/master/Math/441.Arranging-Coins) (M-) [628.Maximum-Product-of-Three-Numbers](https://github.com/wisdompeak/LeetCode/tree/master/Math/628.Maximum-Product-of-Three-Numbers) (M) -[672.Bulb-Switcher-II](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/672.Bulb-Switcher-II) (H) +[672.Bulb-Switcher-II](https://github.com/wisdompeak/LeetCode/tree/master/Math/672.Bulb-Switcher-II) (H) [754.Reach-a-Number](https://github.com/wisdompeak/LeetCode/tree/master/Math/754.Reach-a-Number) (H) [829.Consecutive-Numbers-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Math/829.Consecutive-Numbers-Sum) (M) [878.Nth-Magical-Number](https://github.com/wisdompeak/LeetCode/tree/master/Math/878.Nth-Magical-Number) (M+) @@ -943,7 +1254,6 @@ [963.Minimum-Area-Rectangle-II](https://github.com/wisdompeak/LeetCode/tree/master/Math/963.Minimum-Area-Rectangle-II) (H-) [964.Least-Operators-to-Express-Number](https://github.com/wisdompeak/LeetCode/tree/master/Recursion/964.Least-Operators-to-Express-Number) (H) [972.Equal-Rational-Numbers](https://github.com/wisdompeak/LeetCode/tree/master/Math/972.Equal-Rational-Numbers) (H) -[1012.Numbers-With-Repeated-Digits](https://github.com/wisdompeak/LeetCode/tree/master/Math/1012.Numbers-With-Repeated-Digits) (H-) [1017.Convert-to-Base--2](https://github.com/wisdompeak/LeetCode/tree/master/Math/1017.Convert-to-Base--2) (M+) [1073.Adding-Two-Negabinary-Numbers](https://github.com/wisdompeak/LeetCode/tree/master/Math/1073.Adding-Two-Negabinary-Numbers) (H-) [1025.Divisor-Game](https://github.com/wisdompeak/LeetCode/tree/master/Math/1025.Divisor-Game) (M) @@ -955,17 +1265,27 @@ [1680.Concatenation-of-Consecutive-Binary-Numbers](https://github.com/wisdompeak/LeetCode/tree/master/Math/1680.Concatenation-of-Consecutive-Binary-Numbers) (M) [1739.Building-Boxes](https://github.com/wisdompeak/LeetCode/tree/master/Math/1739.Building-Boxes) (H-) [1806.Minimum-Number-of-Operations-to-Reinitialize-a-Permutation](https://github.com/wisdompeak/LeetCode/tree/master/Math/1806.Minimum-Number-of-Operations-to-Reinitialize-a-Permutation) (H) +[1922.Count-Good-Numbers](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1922.Count-Good-Numbers) (M) [1969.Minimum-Non-Zero-Product-of-the-Array-Elements](https://github.com/wisdompeak/LeetCode/tree/master/Math/1969.Minimum-Non-Zero-Product-of-the-Array-Elements) (M+) [2128.Remove-All-Ones-With-Row-and-Column-Flips](https://github.com/wisdompeak/LeetCode/tree/master/Math/2128.Remove-All-Ones-With-Row-and-Column-Flips) (M+) -[2183.Count-Array-Pairs-Divisible-by-K](https://github.com/wisdompeak/LeetCode/tree/master/Math/2183.Count-Array-Pairs-Divisible-by-K) (M+) +[2217.Find-Palindrome-With-Fixed-Length](https://github.com/wisdompeak/LeetCode/tree/master/Math/2217.Find-Palindrome-With-Fixed-Length) (M+) * ``Distances`` -[296.Best-Meeting-Point](https://github.com/wisdompeak/LeetCode/tree/master/Math/296.Best-Meeting-Point) (M+) -[2033.Minimum-Operations-to-Make-a-Uni-Value-Grid](https://github.com/wisdompeak/LeetCode/tree/master/Math/2033.Minimum-Operations-to-Make-a-Uni-Value-Grid) (M+) -[1703.Minimum-Adjacent-Swaps-for-K-Consecutive-Ones](https://github.com/wisdompeak/LeetCode/tree/master/Math/1703.Minimum-Adjacent-Swaps-for-K-Consecutive-Ones) (H) [1478.Allocate-Mailboxes](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1478.Allocate-Mailboxes) (H) -[1131.Maximum-of-Absolute-Value-Expression](https://github.com/wisdompeak/LeetCode/tree/master/Math/1131.Maximum-of-Absolute-Value-Expression) (H) +[1131.Maximum-of-Absolute-Value-Expression](https://github.com/wisdompeak/LeetCode/tree/master/Math/1131.Maximum-of-Absolute-Value-Expression) (H) +[3102.Minimize-Manhattan-Distances](https://github.com/wisdompeak/LeetCode/tree/master/Math/3102.Minimize-Manhattan-Distances) (H) 1515.Best Position for a Service Centre (TBD) [1956.Minimum-Time-For-K-Virus-Variants-to-Spread](https://github.com/wisdompeak/LeetCode/tree/master/Math/1956.Minimum-Time-For-K-Virus-Variants-to-Spread) (H+) +* ``Median Theorem`` +[296.Best-Meeting-Point](https://github.com/wisdompeak/LeetCode/tree/master/Math/296.Best-Meeting-Point) (M+) +[462.Minimum-Moves-to-Equal-Array-Elements-II](https://github.com/wisdompeak/LeetCode/tree/master/Math/462.Minimum-Moves-to-Equal-Array-Elements-II) (M-) +[1703.Minimum-Adjacent-Swaps-for-K-Consecutive-Ones](https://github.com/wisdompeak/LeetCode/tree/master/Math/1703.Minimum-Adjacent-Swaps-for-K-Consecutive-Ones) (H) +[2033.Minimum-Operations-to-Make-a-Uni-Value-Grid](https://github.com/wisdompeak/LeetCode/tree/master/Math/2033.Minimum-Operations-to-Make-a-Uni-Value-Grid) (M+) +[2448.Minimum-Cost-to-Make-Array-Equal](https://github.com/wisdompeak/LeetCode/tree/master/Math/2448.Minimum-Cost-to-Make-Array-Equal) (H-) +[2607.Make-K-Subarray-Sums-Equal](https://github.com/wisdompeak/LeetCode/tree/master/Math/2607.Make-K-Subarray-Sums-Equal) (M+) +[1838.Frequency-of-the-Most-Frequent-Element](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/1838.Frequency-of-the-Most-Frequent-Element) (H-) +[2967.Minimum-Cost-to-Make-Array-Equalindromic](https://github.com/wisdompeak/LeetCode/tree/master/Math/2967.Minimum-Cost-to-Make-Array-Equalindromic) (H-) +[2968.Apply-Operations-to-Maximize-Frequency-Score](https://github.com/wisdompeak/LeetCode/tree/master/Math/2968.Apply-Operations-to-Maximize-Frequency-Score) (H-) +[3086.Minimum-Moves-to-Pick-K-Ones](https://github.com/wisdompeak/LeetCode/tree/master/Math/3086.Minimum-Moves-to-Pick-K-Ones) (H) * ``Geometry`` [223.Rectangle-Area](https://github.com/wisdompeak/LeetCode/tree/master/Math/223.Rectangle-Area) (M+) [335.Self-Crossing](https://github.com/wisdompeak/LeetCode/tree/master/Math/335.Self-Crossing) (H) @@ -976,6 +1296,8 @@ [1401.Circle-and-Rectangle-Overlapping](https://github.com/wisdompeak/LeetCode/tree/master/Math/1401.Circle-and-Rectangle-Overlapping) (H) [1453.Maximum-Number-of-Darts-Inside-of-a-Circular-Dartboard](https://github.com/wisdompeak/LeetCode/tree/master/Math/1453.Maximum-Number-of-Darts-Inside-of-a-Circular-Dartboard) (H) [1610.Maximum-Number-of-Visible-Points](https://github.com/wisdompeak/LeetCode/tree/master/Math/1610.Maximum-Number-of-Visible-Points) (H) +[2280.Minimum-Lines-to-Represent-a-Line-Chart](https://github.com/wisdompeak/LeetCode/tree/master/Math/2280.Minimum-Lines-to-Represent-a-Line-Chart) (M) +[3197.Find-the-Minimum-Area-to-Cover-All-Ones-II](https://github.com/wisdompeak/LeetCode/tree/master/Math/3197.Find-the-Minimum-Area-to-Cover-All-Ones-II) (H-) * ``Random Pick`` [382.Linked-List-Random-Node](https://github.com/wisdompeak/LeetCode/tree/master/Math/382.Linked-List-Random-Node) (H) [470.Implement-Rand10()-Using-Rand7()](https://github.com/wisdompeak/LeetCode/tree/master/Math/470.Implement-Rand10--Using-Rand7) (M+) @@ -999,26 +1321,39 @@ [1830.Minimum-Number-of-Operations-to-Make-String-Sorted](https://github.com/wisdompeak/LeetCode/tree/master/Math/1830.Minimum-Number-of-Operations-to-Make-String-Sorted) (H) [1866.Number-of-Ways-to-Rearrange-Sticks-With-K-Sticks-Visible](https://github.com/wisdompeak/LeetCode/tree/master/Math/1866.Number-of-Ways-to-Rearrange-Sticks-With-K-Sticks-Visible) (H) [1916.Count-Ways-to-Build-Rooms-in-an-Ant-Colony](https://github.com/wisdompeak/LeetCode/tree/master/Math/1916.Count-Ways-to-Build-Rooms-in-an-Ant-Colony) (H) +[2221.Find-Triangular-Sum-of-an-Array](https://github.com/wisdompeak/LeetCode/tree/master/Math/2221.Find-Triangular-Sum-of-an-Array) (M) +[2400.Number-of-Ways-to-Reach-a-Position-After-Exactly-k-Steps](https://github.com/wisdompeak/LeetCode/tree/master/Math/2400.Number-of-Ways-to-Reach-a-Position-After-Exactly-k-Steps) (M+) +[2514.Count-Anagrams](https://github.com/wisdompeak/LeetCode/tree/master/Math/2514.Count-Anagrams) (H-) +[2539.Count-the-Number-of-Good-Subsequences](https://github.com/wisdompeak/LeetCode/tree/master/Math/2539.Count-the-Number-of-Good-Subsequences) (H-) +[2930.Number-of-Strings-Which-Can-Be-Rearranged-to-Contain-Substring](https://github.com/wisdompeak/LeetCode/tree/master/Math/2930.Number-of-Strings-Which-Can-Be-Rearranged-to-Contain-Substring) (H-) +[2954.Count-the-Number-of-Infection-Sequences](https://github.com/wisdompeak/LeetCode/tree/master/Math/2954.Count-the-Number-of-Infection-Sequences) (H) +[3395.Subsequences-with-a-Unique-Middle-Mode-I](https://github.com/wisdompeak/LeetCode/tree/master/Math/3395.Subsequences-with-a-Unique-Middle-Mode-I) (H) +[3405.Count-the-Number-of-Arrays-with-K-Matching-Adjacent-Elements](https://github.com/wisdompeak/LeetCode/tree/master/Math/3405.Count-the-Number-of-Arrays-with-K-Matching-Adjacent-Elements) (H-) +[3428.Maximum-and-Minimum-Sums-of-at-Most-Size-K-Subsequences](https://github.com/wisdompeak/LeetCode/tree/master/Math/3428.Maximum-and-Minimum-Sums-of-at-Most-Size-K-Subsequences) (M+) +[3463.Check-If-Digits-Are-Equal-in-String-After-Operations-II](https://github.com/wisdompeak/LeetCode/tree/master/Math/3463.Check-If-Digits-Are-Equal-in-String-After-Operations-II) (H+) +[3518.Smallest-Palindromic-Rearrangement-II](https://github.com/wisdompeak/LeetCode/tree/master/Math/3518.Smallest-Palindromic-Rearrangement-II) (H) * ``Numerical Theory`` +[204.Count-Primes](https://github.com/wisdompeak/LeetCode/tree/master/Math/204.Count-Primes) (M) [343.Integer-Break](https://github.com/wisdompeak/LeetCode/tree/master/Math/343.Integer-Break) (H-) [365.Water-and-Jug-Problem](https://github.com/wisdompeak/LeetCode/tree/master/Math/365.Water-and-Jug-Problem) (H) [1808.Maximize-Number-of-Nice-Divisors](https://github.com/wisdompeak/LeetCode/tree/master/Math/1808.Maximize-Number-of-Nice-Divisors) (H-) - +[1819.Number-of-Different-Subsequences-GCDs](https://github.com/wisdompeak/LeetCode/tree/master/Math/1819.Number-of-Different-Subsequences-GCDs) (H-) +[2183.Count-Array-Pairs-Divisible-by-K](https://github.com/wisdompeak/LeetCode/tree/master/Math/2183.Count-Array-Pairs-Divisible-by-K) (M+) +[2344.Minimum-Deletions-to-Make-Array-Divisible](https://github.com/wisdompeak/LeetCode/tree/master/Math/2344.Minimum-Deletions-to-Make-Array-Divisible) (E) +[2543.Check-if-Point-Is-Reachable](https://github.com/wisdompeak/LeetCode/tree/master/Math/2543.Check-if-Point-Is-Reachable) (H) +[2654.Minimum-Number-of-Operations-to-Make-All-Array-Elements-Equal-to-1](https://github.com/wisdompeak/LeetCode/tree/master/Math/2654.Minimum-Number-of-Operations-to-Make-All-Array-Elements-Equal-to-1) (M) +[3164.Find-the-Number-of-Good-Pairs-II](https://github.com/wisdompeak/LeetCode/tree/master/Math/3164.Find-the-Number-of-Good-Pairs-II) (M+) #### [Greedy](https://github.com/wisdompeak/LeetCode/tree/master/Greedy) [055.Jump-Game](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/055.Jump-Game) (E+) [045.Jump-Game-II](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/045.Jump-Game-II) (M) [134.Gas-Station](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/134.Gas-Station) (H) -[221.Maximal-Square](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/221.Maximal-Square) (H) -[229.Majority-Element-II](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/229.Majority-Element-II) (H) [659.Split-Array-into-Consecutive-Subsequences](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/659.Split-Array-into-Consecutive-Subsequences) (H) -[484.Find-Permutation](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/484.Find-Permutation) (H) [386.Lexicographical-Numbers](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/386.Lexicographical-Numbers) (H) 624.Maximum-Distance-in-Arrays (M) [665.Non-decreasing-Array](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/665.Non-decreasing-Array) (H) 670.Maximum-Swap (M+) 649.Dota2-Senate (H) -[330.Patching-Array](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/330.Patching-Array) (H) [683.K-Empty-Slots](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/683.K-Empty-Slots) (H) [517.Super-Washing-Machines](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/517.Super-Washing-Machines) (H) 870.Advantage-Shuffle (M) @@ -1036,9 +1371,7 @@ [1253.Reconstruct-a-2-Row-Binary-Matrix](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1253.Reconstruct-a-2-Row-Binary-Matrix) (M) [1354.Construct-Target-Array-With-Multiple-Sums](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1354.Construct-Target-Array-With-Multiple-Sums) (H-) [1414.Find-the-Minimum-Number-of-Fibonacci-Numbers-Whose-Sum-Is-K](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1414.Find-the-Minimum-Number-of-Fibonacci-Numbers-Whose-Sum-Is-K) (M+) -[1488.Avoid-Flood-in-The-City](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1488.Avoid-Flood-in-The-City) (H-) [1505.Minimum-Possible-Integer-After-at-Most-K-Adjacent-Swaps-On-Digits](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1505.Minimum-Possible-Integer-After-at-Most-K-Adjacent-Swaps-On-Digits) (H) -[1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array) (H-) [1535.Find-the-Winner-of-an-Array-Game](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1535.Find-the-Winner-of-an-Array-Game) (M+) [1536.Minimum-Swaps-to-Arrange-a-Binary-Grid](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1536.Minimum-Swaps-to-Arrange-a-Binary-Grid) (H-) [1540.Can-Convert-String-in-K-Moves](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1540.Can-Convert-String-in-K-Moves) (M+) @@ -1060,6 +1393,47 @@ [2171.Removing-Minimum-Number-of-Magic-Beans](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2171.Removing-Minimum-Number-of-Magic-Beans) (M) [2182.Construct-String-With-Repeat-Limit](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2182.Construct-String-With-Repeat-Limit) (M+) [2193.Minimum-Number-of-Moves-to-Make-Palindrome](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2193.Minimum-Number-of-Moves-to-Make-Palindrome) (H+) +[2216.Minimum-Deletions-to-Make-Array-Beautiful](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2216.Minimum-Deletions-to-Make-Array-Beautiful) (M+) +[2242.Maximum-Score-of-a-Node-Sequence](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2242.Maximum-Score-of-a-Node-Sequence) (M+) +[2257.Count-Unguarded-Cells-in-the-Grid](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2257.Count-Unguarded-Cells-in-the-Grid) (M+) +[2275.Largest-Combination-With-Bitwise-AND-Greater-Than-Zero](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2275.Largest-Combination-With-Bitwise-AND-Greater-Than-Zero) (M+) +[2306.Naming-a-Company](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2306.Naming-a-Company) (H-) +[2311.Longest-Binary-Subsequence-Less-Than-or-Equal-to-K](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2311.Longest-Binary-Subsequence-Less-Than-or-Equal-to-K) (H-) +[2332.The-Latest-Time-to-Catch-a-Bus](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2332.The-Latest-Time-to-Catch-a-Bus) (H-) +[2350.Shortest-Impossible-Sequence-of-Rolls](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2350.Shortest-Impossible-Sequence-of-Rolls) (M+) +[2365.Task-Scheduler-II](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2365.Task-Scheduler-II) (M) +[2366.Minimum-Replacements-to-Sort-the-Array](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2366.Minimum-Replacements-to-Sort-the-Array) (H-) +[2371.Minimize-Maximum-Value-in-a-Grid](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2371.Minimize-Maximum-Value-in-a-Grid) (M+) +[2449.Minimum-Number-of-Operations-to-Make-Arrays-Similar](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2449.Minimum-Number-of-Operations-to-Make-Arrays-Similar) (M+) +[2457.Minimum-Addition-to-Make-Integer-Beautiful](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2457.Minimum-Addition-to-Make-Integer-Beautiful) (M) +[2546.Apply-Bitwise-Operations-to-Make-Strings-Equal](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2546.Apply-Bitwise-Operations-to-Make-Strings-Equal) (M+) +[2551.Put-Marbles-in-Bags](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2551.Put-Marbles-in-Bags) (M+) +[2561.Rearranging-Fruits](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2561.Rearranging-Fruits) (H-) +[2598.Smallest-Missing-Non-negative-Integer-After-Operations](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2598.Smallest-Missing-Non-negative-Integer-After-Operations) (M) +[2813.Maximum-Elegance-of-a-K-Length-Subsequence](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2813.Maximum-Elegance-of-a-K-Length-Subsequence) (H-) +[2835.Minimum-Operations-to-Form-Subsequence-With-Target-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2835.Minimum-Operations-to-Form-Subsequence-With-Target-Sum) (M+) +[2871.Split-Array-Into-Maximum-Number-of-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2871.Split-Array-Into-Maximum-Number-of-Subarrays) (M+) +[2868.The-Wording-Game](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2868.The-Wording-Game) (M) +[2897.Apply-Operations-on-Array-to-Maximize-Sum-of-Squares](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2897.Apply-Operations-on-Array-to-Maximize-Sum-of-Squares) (M+) +[3022.Minimize-OR-of-Remaining-Elements-Using-Operations](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/3022.Minimize-OR-of-Remaining-Elements-Using-Operations) (H) +[3219.Minimum-Cost-for-Cutting-Cake-II](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/3219.Minimum-Cost-for-Cutting-Cake-II) (H) +[3635.Earliest-Finish-Time-for-Land-and-Water-Rides-II](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/3635.Earliest-Finish-Time-for-Land-and-Water-Rides-II) (H-) +* ``Boyer-Moore Majority Voting`` +[229.Majority-Element-II](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/229.Majority-Element-II) (H) +[2856.Minimum-Array-Length-After-Pair-Removals](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2856.Minimum-Array-Length-After-Pair-Removals) (M) +[3139.Minimum-Cost-to-Equalize-Array](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/3139.Minimum-Cost-to-Equalize-Array) (H) +* ``Lexicographical Sequence`` +[031.Next-Permutation](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/031.Next-Permutation) (M) +[556.Next-Greater-Element-III](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/556.Next-Greater-Element-III) (M) +[2663.Lexicographically-Smallest-Beautiful-String](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2663.Lexicographically-Smallest-Beautiful-String) (H-) +* ``DI Sequence`` +[942.DI-String-Match](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/942.DI-String-Match) (M) +[484.Find-Permutation](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/484.Find-Permutation) (M) +[2375.Construct-Smallest-Number-From-DI-String](https://github.com/wisdompeak/LeetCode/blob/master/Greedy/2375.Construct-Smallest-Number-From-DI-String) (M) +* ``Smear Top Elements`` +[2233.Maximum-Product-After-K-Increments](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2233.Maximum-Product-After-K-Increments) (M+) +[2333.Minimum-Sum-of-Squared-Difference](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2333.Minimum-Sum-of-Squared-Difference) (M+) +[2234.Maximum-Total-Beauty-of-the-Gardens](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2234.Maximum-Total-Beauty-of-the-Gardens) (H-) * ``LIS`` [300.Longest-Increasing-Subsequence](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/300.Longest-Increasing-Subsequence) (M+) [354.Russian-Doll-Envelopes](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/354.Russian-Doll-Envelopes) (H-) @@ -1073,6 +1447,7 @@ * ``Three-pass`` [042.Trapping-Rain-Water](https://github.com/wisdompeak/LeetCode/tree/master/Others/042.Trapping-Rain-Water) (H-) [334.Increasing-Triplet-Subsequence](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/334.Increasing-Triplet-Subsequence) (H-) +[689.Maximum-Sum-of-3-Non-Overlapping-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/689.Maximum-Sum-of-3-Non-Overlapping-Subarrays) (M+) [907.Sum-of-Subarray-Minimums](https://github.com/wisdompeak/LeetCode/tree/master/Stack/907.Sum-of-Subarray-Minimums) (H) [1525.Number-of-Good-Ways-to-Split-a-String](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1525.Number-of-Good-Ways-to-Split-a-String) (M) [1638.Count-Substrings-That-Differ-by-One-Character](https://github.com/wisdompeak/LeetCode/tree/master/Dynamic_Programming/1638.Count-Substrings-That-Differ-by-One-Character) (M+) @@ -1083,12 +1458,15 @@ [1888.Minimum-Number-of-Flips-to-Make-the-Binary-String-Alternating](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1888.Minimum-Number-of-Flips-to-Make-the-Binary-String-Alternating) (M+) [2163.Minimum-Difference-in-Sums-After-Removal-of-Elements](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2163.Minimum-Difference-in-Sums-After-Removal-of-Elements) (M+) [2167.Minimum-Time-to-Remove-All-Cars-Containing-Illegal-Goods](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2167.Minimum-Time-to-Remove-All-Cars-Containing-Illegal-Goods) (H-) +[2555.Maximize-Win-From-Two-Segments](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2555.Maximize-Win-From-Two-Segments) (M+) +[2565.Subsequence-With-the-Minimum-Score](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2565.Subsequence-With-the-Minimum-Score) (H-) * ``State Machine`` [524.Longest-Word-in-Dictionary-through-Deleting](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/524.Longest-Word-in-Dictionary-through-Deleting) (M+) [727.Minimum-Window-Subsequence](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/727.Minimum-Window-Subsequence) (H-) [792.Number-of-Matching-Subsequences](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/792.Number-of-Matching-Subsequences) (H-) [1055.Shortest-Way-to-Form-String](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1055.Shortest-Way-to-Form-String) (M+) [2055.Plates-Between-Candles](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2055.Plates-Between-Candles) (M+) +[2370.Longest-Ideal-Subsequence](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2370.Longest-Ideal-Subsequence) (M) * ``Sort`` 164.Maximum-Gap (H) [179.Largest-Number](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/179.Largest-Number) (H-) @@ -1099,11 +1477,15 @@ 826.Most-Profit-Assigning-Work (M) [1268.Search-Suggestions-System](https://github.com/wisdompeak/LeetCode/tree/master/Trie/1268.Search-Suggestions-System) (H-) [1402.Reducing-Dishes](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1402.Reducing-Dishes) (M) -[1520.Maximum-Number-of-Non-Overlapping-Substrings](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1520.Maximum-Number-of-Non-Overlapping-Substrings) (H-) [1564.Put-Boxes-Into-the-Warehouse-I](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1564.Put-Boxes-Into-the-Warehouse-I) (M+) [1665.Minimum-Initial-Energy-to-Finish-Tasks](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1665.Minimum-Initial-Energy-to-Finish-Tasks) (H-) [1686.Stone-Game-VI](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1686.Stone-Game-VI) (H-) [1996.The-Number-of-Weak-Characters-in-the-Game](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1996.The-Number-of-Weak-Characters-in-the-Game) (M+) +[2250.Count-Number-of-Rectangles-Containing-Each-Point](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2250.Count-Number-of-Rectangles-Containing-Each-Point) (H-) +[2343.Query-Kth-Smallest-Trimmed-Number](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2343.Query-Kth-Smallest-Trimmed-Number) (H-) +[2412.Minimum-Money-Required-Before-Transactions](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2412.Minimum-Money-Required-Before-Transactions) (H-) +[2345.Finding-the-Number-of-Visible-Mountains](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2345.Finding-the-Number-of-Visible-Mountains) (H-) +[3027.Find-the-Number-of-Ways-to-Place-People-II](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/3027.Find-the-Number-of-Ways-to-Place-People-II) (M) * ``Indexing Sort`` [041.First-Missing-Positive](https://github.com/wisdompeak/LeetCode/blob/master/Greedy/041.First-Missing-Positive/Readme.md) (H) [268.Missing-Number](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/268.Missing-Number) (H-) @@ -1111,6 +1493,9 @@ [442.Find-All-Duplicates-in-an-Array](https://github.com/wisdompeak/LeetCode/blob/master/Greedy/442.Find-All-Duplicates-in-an-Array) (M) [448.Find-All-Numbers-Disappeared-in-an-Array](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/448.Find-All-Numbers-Disappeared-in-an-Array) (M) [645.Set-Mismatch](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/645.Set-Mismatch) (M) +[2471.Minimum-Number-of-Operations-to-Sort-a-Binary-Tree-by-Level](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2471.Minimum-Number-of-Operations-to-Sort-a-Binary-Tree-by-Level) (M+) +[2459.Sort-Array-by-Moving-Items-to-Empty-Space](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2459.Sort-Array-by-Moving-Items-to-Empty-Space) (H) +[3551.Minimum-Swaps-to-Sort-by-Digit-Sum](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/3551.Minimum-Swaps-to-Sort-by-Digit-Sum) (M) * ``Parenthesis`` [032.Longest-Valid-Parentheses](https://github.com/wisdompeak/LeetCode/tree/master/Stack/032.Longest-Valid-Parentheses) (H) [921.Minimum-Add-to-Make-Parentheses-Valid](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/921.Minimum-Add-to-Make-Parentheses-Valid) (M+) @@ -1127,10 +1512,17 @@ [1272.Remove-Interval](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1272.Remove-Interval) (M+) [1288.Remove-Covered-Intervals](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1288.Remove-Covered-Intervals) (M+) [1326.Minimum-Number-of-Taps-to-Open-to-Water-a-Garden](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1326.Minimum-Number-of-Taps-to-Open-to-Water-a-Garden) (M+) -[1235.Maximum-Profit-in-Job-Scheduling](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1235.Maximum-Profit-in-Job-Scheduling) (H-) -[1751.Maximum-Number-of-Events-That-Can-Be-Attended-II](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1751.Maximum-Number-of-Events-That-Can-Be-Attended-II) (H) -[2008.Maximum-Earnings-From-Taxi](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2008.Maximum-Earnings-From-Taxi) (M+) [2054.Two-Best-Non-Overlapping-Events](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2054.Two-Best-Non-Overlapping-Events) (H-) +[2580.Count-Ways-to-Group-Overlapping-Ranges](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2580.Count-Ways-to-Group-Overlapping-Ranges) (M) +[2589.Minimum-Time-to-Complete-All-Tasks](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2589.Minimum-Time-to-Complete-All-Tasks) (H) +[2983.Palindrome-Rearrangement-Queries](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2983.Palindrome-Rearrangement-Queries) (H+) +[2781.Length-of-the-Longest-Valid-Substring](https://github.com/wisdompeak/LeetCode/tree/master/String/2781.Length-of-the-Longest-Valid-Substring) (H-) +[3394.Check-if-Grid-can-be-Cut-into-Sections](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/3394.Check-if-Grid-can-be-Cut-into-Sections) (M) +[2271.Maximum-White-Tiles-Covered-by-a-Carpet](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2271.Maximum-White-Tiles-Covered-by-a-Carpet) (M+) +[3413.Maximum-Coins-From-K-Consecutive-Bags](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/3413.Maximum-Coins-From-K-Consecutive-Bags) (H-) +3104.Find Longest Self-Contained Substring (TBD) +[1520.Maximum-Number-of-Non-Overlapping-Substrings](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1520.Maximum-Number-of-Non-Overlapping-Substrings) (H-) +[3458.Select-K-Disjoint-Special-Substrings](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/3458.Select-K-Disjoint-Special-Substrings) (H-) * ``Constructive Problems`` [324.Wiggle-Sort-II](https://github.com/wisdompeak/LeetCode/tree/master/Others/324.Wiggle-Sort-II) (H) [667.Beautiful-Arrangement-II](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/667.Beautiful-Arrangement-II) (M) @@ -1139,10 +1531,27 @@ [2007.Find-Original-Array-From-Doubled-Array](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2007.Find-Original-Array-From-Doubled-Array) (M) [2122.Recover-the-Original-Array](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2122.Recover-the-Original-Array) (H-) [1982.Find-Array-Given-Subset-Sums](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/1982.Find-Array-Given-Subset-Sums) (H) +[2202.Maximize-the-Topmost-Element-After-K-Moves](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2202.Maximize-the-Topmost-Element-After-K-Moves) (H) +[2498.Frog-Jump-II](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2498.Frog-Jump-II) (H) +[2499.minimum-total-cost-to-make-arrays-unequal](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2499.minimum-total-cost-to-make-arrays-unequal) (H) +[2567.Minimum-Score-by-Changing-Two-Elements](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2567.Minimum-Score-by-Changing-Two-Elements) (M) +[2568.Minimum-Impossible-OR](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2568.Minimum-Impossible-OR) (H-) +[2571.Minimum-Operations-to-Reduce-an-Integer-to-0](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2571.Minimum-Operations-to-Reduce-an-Integer-to-0) (H-) +[2573.Find-the-String-with-LCP](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2573.Find-the-String-with-LCP) (H-) +[2576.Find-the-Maximum-Number-of-Marked-Indices](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2576.Find-the-Maximum-Number-of-Marked-Indices) (H-) +[2712.Minimum-Cost-to-Make-All-Characters-Equal](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2712.Minimum-Cost-to-Make-All-Characters-Equal) (H-) +[2732.Find-a-Good-Subset-of-the-Matrix](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2732.Find-a-Good-Subset-of-the-Matrix) (H) +[2749.Minimum-Operations-to-Make-the-Integer-Zero](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2749.Minimum-Operations-to-Make-the-Integer-Zero) (H) +[2745.Construct-the-Longest-New-String](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2745.Construct-the-Longest-New-String) (H-) +[2753.Count-Houses-in-a-Circular-Street-II](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2753.Count-Houses-in-a-Circular-Street-II) (H-) +[3012.Minimize-Length-of-Array-Using-Operations](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/3012.Minimize-Length-of-Array-Using-Operations) (H-) +3301.Maximize-the-Total-Height-of-Unique-Towers (M) +3397.Maximum Number-of-Distinct-Elements-After-Operations (M) #### [Simulation](https://github.com/wisdompeak/LeetCode/tree/master/Simulation) [2061.Number-of-Spaces-Cleaning-Robot-Cleaned](https://github.com/wisdompeak/LeetCode/tree/master/Simulation/2061.Number-of-Spaces-Cleaning-Robot-Cleaned) (M) [2069.Walking-Robot-Simulation-II](https://github.com/wisdompeak/LeetCode/tree/master/Simulation/2069.Walking-Robot-Simulation-II) (M+) +[2532.Time-to-Cross-a-Bridge](https://github.com/wisdompeak/LeetCode/tree/master/Simulation/2532.Time-to-Cross-a-Bridge) (H) #### [Others](https://github.com/wisdompeak/LeetCode/tree/master/Others)   [007.Reverse-Integer](https://github.com/wisdompeak/LeetCode/tree/master/Others/007.Reverse-Integer) (M) @@ -1154,7 +1563,6 @@ [311.Sparse-Matrix-Multiplication](https://github.com/wisdompeak/LeetCode/tree/master/Others/311.Sparse-Matrix-Multiplication) (M) 168.Excel-Sheet-Column-Title (H) 453.Minimum-Moves-to-Equal-Array-Elements (M) -689.Maximum-Sum-of-3-Non-Overlapping-Subarrays (M+) [782.Transform-to-Chessboard](https://github.com/wisdompeak/LeetCode/tree/master/Others/782.Transform-to-Chessboard) (H+) [466.Count-The-Repetitions](https://github.com/wisdompeak/LeetCode/tree/master/Others/466.Count-The-Repetitions) (H) [810.Chalkboard-XOR-Game](https://github.com/wisdompeak/LeetCode/tree/master/Others/810.Chalkboard-XOR-Game) (H) @@ -1165,12 +1573,11 @@ 825.Friends-Of-Appropriate-Ages (M+) [835.Image-Overlap](https://github.com/wisdompeak/LeetCode/tree/master/Others/835.Image-Overlap) (H) [843.Guess-the-Word](https://github.com/wisdompeak/LeetCode/tree/master/Others/843.Guess-the-Word) (M) -855.Exam-Room (M+) 918.Maximum-Sum-Circular-Subarray (H-) [927.Three-Equal-Parts](https://github.com/wisdompeak/LeetCode/tree/master/Others/927.Three-Equal-Parts) (M) [978.Longest-Turbulent-Subarray](https://github.com/wisdompeak/LeetCode/tree/master/Others/978.Longest-Turbulent-Subarray) (H-) -1067.Digit-Count-in-Range (H) 1183.Maximum-Number-of-Ones (H) +[1224.Maximum-Equal-Frequency](https://github.com/wisdompeak/LeetCode/tree/master/Others/1224.Maximum-Equal-Frequency) (H) (aka. 2423. Remove Letter To Equalize Frequency) [1267.Count-Servers-that-Communicate](https://github.com/wisdompeak/LeetCode/tree/master/Others/1267.Count-Servers-that-Communicate) (M+) [1538.Guess-the-Majority-in-a-Hidden-Array](https://github.com/wisdompeak/LeetCode/tree/master/Others/1538.Guess-the-Majority-in-a-Hidden-Array) (M+) [1706.Where-Will-the-Ball-Fall](https://github.com/wisdompeak/LeetCode/tree/master/Others/1706.Where-Will-the-Ball-Fall) (M+) @@ -1183,12 +1590,47 @@ [1997.First-Day-Where-You-Have-Been-in-All-the-Rooms](https://github.com/wisdompeak/LeetCode/tree/master/Others/1997.First-Day-Where-You-Have-Been-in-All-the-Rooms) (H) [2018.Check-if-Word-Can-Be-Placed-In-Crossword](https://github.com/wisdompeak/LeetCode/tree/master/Others/2018.Check-if-Word-Can-Be-Placed-In-Crossword) (M+) [2147.Number-of-Ways-to-Divide-a-Long-Corridor](https://github.com/wisdompeak/LeetCode/tree/master/Others/2147.Number-of-Ways-to-Divide-a-Long-Corridor) (M) +[2337.Move-Pieces-to-Obtain-a-String](https://github.com/wisdompeak/LeetCode/tree/master/Others/2337.Move-Pieces-to-Obtain-a-String) (aka. 777.Swap-Adjacent-in-LR-String) (M+) +[2359.Find-Closest-Node-to-Given-Two-Nodes](https://github.com/wisdompeak/LeetCode/tree/master/Others/2359.Find-Closest-Node-to-Given-Two-Nodes) (M) +[2380.Time-Needed-to-Rearrange-a-Binary-String](https://github.com/wisdompeak/LeetCode/tree/master/Others/2380.Time-Needed-to-Rearrange-a-Binary-String) (H) +[2453.Destroy-Sequential-Targets](https://github.com/wisdompeak/LeetCode/tree/master/Others/2453.Destroy-Sequential-Targets) (M) +[2591.Distribute-Money-to-Maximum-Children](https://github.com/wisdompeak/LeetCode/tree/master/Others/2591.Distribute-Money-to-Maximum-Children) (M+) +[2647.Color-the-Triangle-Red](https://github.com/wisdompeak/LeetCode/tree/master/Others/2647.Color-the-Triangle-Red) (H) +[2718.Sum-of-Matrix-After-Queries](https://github.com/wisdompeak/LeetCode/tree/master/Others/2718.Sum-of-Matrix-After-Queries) (M+) +[2808.Minimum-Seconds-to-Equalize-a-Circular-Array](https://github.com/wisdompeak/LeetCode/tree/master/Others/2808.Minimum-Seconds-to-Equalize-a-Circular-Array) (M+) +[2811.Check-if-it-is-Possible-to-Split-Array](https://github.com/wisdompeak/LeetCode/tree/master/Others/2811.Check-if-it-is-Possible-to-Split-Array) (M+) +[3068.Find-the-Maximum-Sum-of-Node-Values](https://github.com/wisdompeak/LeetCode/tree/master/Others/3068.Find-the-Maximum-Sum-of-Node-Values) (M+) +[3400.Maximum-Number-of-Matching-Indices-After-Right-Shifts](https://github.com/wisdompeak/LeetCode/tree/master/Others/3400.Maximum-Number-of-Matching-Indices-After-Right-Shifts) (M+) +* ``公式变形`` +[2898.Maximum-Linear-Stock-Score](https://github.com/wisdompeak/LeetCode/tree/master/Others/2898.Maximum-Linear-Stock-Score) (M) +* ``Collision`` +[853.Car-Fleet](https://github.com/wisdompeak/LeetCode/tree/master/Others/853.Car-Fleet) (M) +[1503.Last-Moment-Before-All-Ants-Fall-Out-of-a-Plank](https://github.com/wisdompeak/LeetCode/tree/master/Others/1503.Last-Moment-Before-All-Ants-Fall-Out-of-a-Plank) (M) +[2211.Count-Collisions-on-a-Road](https://github.com/wisdompeak/LeetCode/tree/master/Others/2211.Count-Collisions-on-a-Road) (M) +[2731.Movement-of-Robots](https://github.com/wisdompeak/LeetCode/tree/master/Others/2731.Movement-of-Robots) (M+) * ``结论转移`` [1685.Sum-of-Absolute-Differences-in-a-Sorted-Array](https://github.com/wisdompeak/LeetCode/tree/master/Others/1685.Sum-of-Absolute-Differences-in-a-Sorted-Array) (M) [2121.Intervals-Between-Identical-Elements](https://github.com/wisdompeak/LeetCode/tree/master/Others/2121.Intervals-Between-Identical-Elements) (M) +[2615.Sum-of-Distances](https://github.com/wisdompeak/LeetCode/tree/master/Others/2615.Sum-of-Distances) (M+) +[3086.Minimum-Moves-to-Pick-K-Ones](https://github.com/wisdompeak/LeetCode/tree/master/Math/3086.Minimum-Moves-to-Pick-K-Ones) (H) +* ``Count Subarray by Element`` +[828.Count-Unique-Characters-of-All-Substrings-of-a-Given-String](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/828.Count-Unique-Characters-of-All-Substrings-of-a-Given-String) (H-) +[907.Sum-of-Subarray-Minimums](https://github.com/wisdompeak/LeetCode/tree/master/Stack/907.Sum-of-Subarray-Minimums) (H-) +[1498.Number-of-Subsequences-That-Satisfy-the-Given-Sum-Condition](https://github.com/wisdompeak/LeetCode/tree/master/Two_Pointers/1498.Number-of-Subsequences-That-Satisfy-the-Given-Sum-Condition) (H-) +[1856.Maximum-Subarray-Min-Product](https://github.com/wisdompeak/LeetCode/tree/master/Stack/1856.Maximum-Subarray-Min-Product) (M+) +[2104.Sum-of-Subarray-Ranges](https://github.com/wisdompeak/LeetCode/tree/master/Stack/2104.Sum-of-Subarray-Ranges) (H-) +[2262.Total-Appeal-of-A-String](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/2262.Total-Appeal-of-A-String) (M+) +[2281.Sum-of-Total-Strength-of-Wizards](https://github.com/wisdompeak/LeetCode/tree/master/Others/2281.Sum-of-Total-Strength-of-Wizards) (H) +[2302.Count-Subarrays-With-Score-Less-Than-K](https://github.com/wisdompeak/LeetCode/tree/master/Others/2302.Count-Subarrays-With-Score-Less-Than-K) (H-) +[2444.Count-Subarrays-With-Fixed-Bounds](https://github.com/wisdompeak/LeetCode/tree/master/Others/2444.Count-Subarrays-With-Fixed-Bounds) (M+) +[2681.Power-of-Heroes](https://github.com/wisdompeak/LeetCode/tree/master/Others/2681.Power-of-Heroes) (H-) +[2763.Sum-of-Imbalance-Numbers-of-All-Subarrays](https://github.com/wisdompeak/LeetCode/tree/master/Others/2763.Sum-of-Imbalance-Numbers-of-All-Subarrays) (H-) +[2818.Apply-Operations-to-Maximize-Score](https://github.com/wisdompeak/LeetCode/tree/master/Others/2818.Apply-Operations-to-Maximize-Score) (H-) +[3428.Maximum-and-Minimum-Sums-of-at-Most-Size-K-Subsequences](https://github.com/wisdompeak/LeetCode/tree/master/Math/3428.Maximum-and-Minimum-Sums-of-at-Most-Size-K-Subsequences) (M+) * ``扫描线 / 差分数组`` [252.Meeting-Rooms](https://github.com/wisdompeak/LeetCode/tree/master/Others/252.Meeting-Rooms) (M) [253.Meeting-Rooms-II](https://github.com/wisdompeak/LeetCode/tree/master/Others/253.Meeting-Rooms-II) (M+) +[370.Range-Addition](https://github.com/wisdompeak/LeetCode/tree/master/Segment_Tree/370.Range-Addition) (H-) [056.Merge-Intervals](https://github.com/wisdompeak/LeetCode/tree/master/Others/056.Merge-Intervals) (M) [057.Insert-Intervals](https://github.com/wisdompeak/LeetCode/tree/master/Others/057.Insert-Interval) (M) [732.My-Calendar-III](https://github.com/wisdompeak/LeetCode/tree/master/Others/732.My-Calendar-III) (M) @@ -1196,17 +1638,31 @@ [798.Smallest-Rotation-with-Highest-Score](https://github.com/wisdompeak/LeetCode/tree/master/Others/798.Smallest-Rotation-with-Highest-Score) (H) [995.Minimum-Number-of-K-Consecutive-Bit-Flips](https://github.com/wisdompeak/LeetCode/tree/master/Others/995.Minimum-Number-of-K-Consecutive-Bit-Flips) (H-) [1094.Car-Pooling](https://github.com/wisdompeak/LeetCode/tree/master/Others/1094.Car-Pooling) (E) -[1109.Corporate-Flight-Bookings](https://github.com/wisdompeak/LeetCode/tree/master/Others/1109.Corporate-Flight-Bookings) (M) +[1109.Corporate-Flight-Bookings](https://github.com/wisdompeak/LeetCode/tree/master/Others/1109.Corporate-Flight-Bookings) (M) +[1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array](https://github.com/wisdompeak/LeetCode/tree/master/Others/1526.Minimum-Number-of-Increments-on-Subarrays-to-Form-a-Target-Array) (H-) [1589.Maximum-Sum-Obtained-of-Any-Permutation](https://github.com/wisdompeak/LeetCode/tree/master/Others/1589.Maximum-Sum-Obtained-of-Any-Permutation) (M) [1674.Minimum-Moves-to-Make-Array-Complementary](https://github.com/wisdompeak/LeetCode/tree/master/Others/1674.Minimum-Moves-to-Make-Array-Complementary) (H) [1871.Jump-Game-VII](https://github.com/wisdompeak/LeetCode/tree/master/Others/1871.Jump-Game-VII) (M+) -1893.Check if All the Integers in a Range Are Covered (E) +[1893.Check-if-All-the-Integers-in-a-Range-Are-Covered](https://github.com/wisdompeak/LeetCode/tree/master/Others/1893.Check-if-All-the-Integers-in-a-Range-Are-Covered) (E) [1943.Describe-the-Painting](https://github.com/wisdompeak/LeetCode/tree/master/Others/1943.Describe-the-Painting) (H-) [2015.Average-Height-of-Buildings-in-Each-Segment](https://github.com/wisdompeak/LeetCode/tree/master/Others/2015.Average-Height-of-Buildings-in-Each-Segment) (H-) +[218.The-Skyline-Problem](https://github.com/wisdompeak/LeetCode/blob/master/Segment_Tree/218.The-Skyline-Problem) (H) [2158.Amount-of-New-Area-Painted-Each-Day](https://github.com/wisdompeak/LeetCode/tree/master/Others/2158.Amount-of-New-Area-Painted-Each-Day) (H-) +[2237.Count-Positions-on-Street-With-Required-Brightness](https://github.com/wisdompeak/LeetCode/tree/master/Others/2237.Count-Positions-on-Street-With-Required-Brightness) (M) +[2251.Number-of-Flowers-in-Full-Bloom](https://github.com/wisdompeak/LeetCode/tree/master/Others/2251.Number-of-Flowers-in-Full-Bloom) (M) +[2327.Number-of-People-Aware-of-a-Secret](https://github.com/wisdompeak/LeetCode/tree/master/Others/2327.Number-of-People-Aware-of-a-Secret) (H-) +[2381.Shifting-Letters-II](https://github.com/wisdompeak/LeetCode/tree/master/Others/2381.Shifting-Letters-II) (M) +[2584.Split-the-Array-to-Make-Coprime-Products](https://github.com/wisdompeak/LeetCode/tree/master/Others/2584.Split-the-Array-to-Make-Coprime-Products) (H) +[2617.Minimum-Number-of-Visited-Cells-in-a-Grid](https://github.com/wisdompeak/LeetCode/tree/master/Others/2617.Minimum-Number-of-Visited-Cells-in-a-Grid) (H) +[2772.Apply-Operations-to-Make-All-Array-Elements-Equal-to-Zero](https://github.com/wisdompeak/LeetCode/tree/master/Others/2772.Apply-Operations-to-Make-All-Array-Elements-Equal-to-Zero) (H-) +[2963.Count-the-Number-of-Good-Partitions](https://github.com/wisdompeak/LeetCode/tree/master/Others/2963.Count-the-Number-of-Good-Partitions) (H-) +[3009.Maximum-Number-of-Intersections-on-the-Chart](https://github.com/wisdompeak/LeetCode/tree/master/Others/3009.Maximum-Number-of-Intersections-on-the-Chart) (H) +[3169.Count-Days-Without-Meetings](https://github.com/wisdompeak/LeetCode/tree/master/Others/3169.Count-Days-Without-Meetings) (M) +[3655.XOR-After-Range-Multiplication-Queries-II](https://github.com/wisdompeak/LeetCode/tree/master/Others/3655.XOR-After-Range-Multiplication-Queries-II) (H+) * ``二维差分`` [850.Rectangle-Area-II](https://github.com/wisdompeak/LeetCode/tree/master/Others/850.Rectangle-Area-II) (H) [2132.Stamping-the-Grid](https://github.com/wisdompeak/LeetCode/tree/master/Others/2132.Stamping-the-Grid) (H) +[2536.Increment-Submatrices-by-One](https://github.com/wisdompeak/LeetCode/tree/master/Others/2536.Increment-Submatrices-by-One) (H-) * ``Enumeration`` [479.Largest-Palindrome-Product](https://github.com/wisdompeak/LeetCode/tree/master/Others/479.Largest-Palindrome-Product) (M+) [866.Prime-Palindrome](https://github.com/wisdompeak/LeetCode/tree/master/Others/866.Prime-Palindrome) (H-) @@ -1217,9 +1673,19 @@ [1714.Sum-Of-Special-Evenly-Spaced-Elements-In-Array](https://github.com/wisdompeak/LeetCode/tree/master/Others/1714.Sum-Of-Special-Evenly-Spaced-Elements-In-Array) (H) [1737.Change-Minimum-Characters-to-Satisfy-One-of-Three-Conditions](https://github.com/wisdompeak/LeetCode/tree/master/Others/1737.Change-Minimum-Characters-to-Satisfy-One-of-Three-Conditions) (M+) [2013.Detect-Squares](https://github.com/wisdompeak/LeetCode/tree/master/Others/2013.Detect-Squares) (M+) +[2552.Count-Increasing-Quadruplets](https://github.com/wisdompeak/LeetCode/tree/master/Others/2552.Count-Increasing-Quadruplets) (H-) +[2768.Number-of-Black-Blocks](https://github.com/wisdompeak/LeetCode/tree/master/Others/2768.Number-of-Black-Blocks) (M+) +[2857.Count-Pairs-of-Points-With-Distance-k](https://github.com/wisdompeak/LeetCode/tree/master/Others/2857.Count-Pairs-of-Points-With-Distance-k) (M+) +[3404.Count-Special-Subsequences](https://github.com/wisdompeak/LeetCode/tree/master/Others/3404.Count-Special-Subsequences) (H) +[3447.Assign-Elements-to-Groups-with-Constraints](https://github.com/wisdompeak/LeetCode/tree/master/Others/3447.Assign-Elements-to-Groups-with-Constraints) (M+) +[3628.Maximum-Number-of-Subsequences-After-One-Inserting](https://github.com/wisdompeak/LeetCode/tree/master/Others/3628.Maximum-Number-of-Subsequences-After-One-Inserting) (H-) +[3640.Trionic-Array-II](https://github.com/wisdompeak/LeetCode/tree/master/Others/3640.Trionic-Array-II) (M+) * ``Presum`` [1878.Get-Biggest-Three-Rhombus-Sums-in-a-Grid](https://github.com/wisdompeak/LeetCode/tree/master/Others/1878.Get-Biggest-Three-Rhombus-Sums-in-a-Grid) (M+) [1906.Minimum-Absolute-Difference-Queries](https://github.com/wisdompeak/LeetCode/tree/master/Others/1906.Minimum-Absolute-Difference-Queries) (M+) +[2245.Maximum-Trailing-Zeros-in-a-Cornered-Path](https://github.com/wisdompeak/LeetCode/tree/master/Others/2245.Maximum-Trailing-Zeros-in-a-Cornered-Path) (M) +[2281.Sum-of-Total-Strength-of-Wizards](https://github.com/wisdompeak/LeetCode/tree/master/Others/2281.Sum-of-Total-Strength-of-Wizards) (H) +[2438.Range-Product-Queries-of-Powers](https://github.com/wisdompeak/LeetCode/tree/master/Others/2438.Range-Product-Queries-of-Powers) (M+) * ``2D Presum`` 1314.Matrix-Block-Sum (M) [1292.Maximum-Side-Length-of-a-Square-with-Sum-Less-than-or-Equal-to-Threshold](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1292.Maximum-Side-Length-of-a-Square-with-Sum-Less-than-or-Equal-to-Threshold) (H-) @@ -1229,6 +1695,25 @@ [347.Top-K-Frequent-Elements](https://github.com/wisdompeak/LeetCode/tree/master/Others/347.Top-K-Frequent-Elements) (M+) [973.K-Closest-Points-to-Origin](https://github.com/wisdompeak/LeetCode/tree/master/Others/973.K-Closest-Points-to-Origin) (M) [324.Wiggle-Sort-II](https://github.com/wisdompeak/LeetCode/tree/master/Others/324.Wiggle-Sort-II) (H) +* ``Digit counting`` +[233.Number-of-Digit-One](https://github.com/wisdompeak/LeetCode/tree/master/Math/233.Number-of-Digit-One) (H-) +[3007.Maximum-Number-That-Sum-of-the-Prices-Is-Less-Than-or-Equal-to-K](https://github.com/wisdompeak/LeetCode/tree/master/Others/3007.Maximum-Number-That-Sum-of-the-Prices-Is-Less-Than-or-Equal-to-K) (H) +[1067.Digit-Count-in-Range](https://github.com/wisdompeak/LeetCode/tree/master/Others/1067.Digit-Count-in-Range) (H) +[357.Count-Numbers-with-Unique-Digits](https://github.com/wisdompeak/LeetCode/tree/master/Others/357.Count-Numbers-with-Unique-Digits) (M) +[2417.Closest-Fair-Integer](https://github.com/wisdompeak/LeetCode/tree/master/Others/2417.Closest-Fair-Integer) (H-) + +#### [Thinking](https://github.com/wisdompeak/LeetCode/tree/master/Thinking)   +[2860.Happy-Students](https://github.com/wisdompeak/LeetCode/tree/master/Thinking/2860.Happy-Students) (M+) +[2862.Maximum-Element-Sum-of-a-Complete-Subset-of-Indices](https://github.com/wisdompeak/LeetCode/tree/master/Thinking/2862.Maximum-Element-Sum-of-a-Complete-Subset-of-Indices) (H-) +[2910.Minimum-Number-of-Groups-to-Create-a-Valid-Assignment](https://github.com/wisdompeak/LeetCode/tree/master/Thinking/2910.Minimum-Number-of-Groups-to-Create-a-Valid-Assignment) (H-) +[2939.Maximum-Xor-Product](https://github.com/wisdompeak/LeetCode/tree/master/Thinking/2939.Maximum-Xor-Product) (H-) +[2957.Remove-Adjacent-Almost-Equal-Characters](https://github.com/wisdompeak/LeetCode/tree/master/Thinking/2957.Remove-Adjacent-Almost-Equal-Characters) (M) +[330.Patching-Array](https://github.com/wisdompeak/LeetCode/tree/master/Greedy/330.Patching-Array) (H) +[1798.Maximum-Number-of-Consecutive-Values-You-Can-Make](https://github.com/wisdompeak/LeetCode/blob/master/Greedy/1798.Maximum-Number-of-Consecutive-Values-You-Can-Make) (H-) +[2952.Minimum-Number-of-Coins-to-be-Added](https://github.com/wisdompeak/LeetCode/tree/master/Thinking/2952.Minimum-Number-of-Coins-to-be-Added) (H-) +[3609.Minimum-Moves-to-Reach-Target-in-Grid](https://github.com/wisdompeak/LeetCode/tree/master/Thinking/3609.Minimum-Moves-to-Reach-Target-in-Grid) (H) +[3644.Maximum-K-to-Sort-a-Permutation](https://github.com/wisdompeak/LeetCode/tree/master/Thinking/3644.Maximum-K-to-Sort-a-Permutation) (H) +[3660.Jump-Game-IX](https://github.com/wisdompeak/LeetCode/tree/master/Thinking/3660.Jump-Game-IX) (H) #### [LeetCode Cup](https://github.com/wisdompeak/LeetCode/tree/master/LCCUP) [LCP23.魔术排列](https://github.com/wisdompeak/LeetCode/tree/master/LCCUP/2020Fall/LCP23.%E9%AD%94%E6%9C%AF%E6%8E%92%E5%88%97) @@ -1246,8 +1731,11 @@ [Inverse_Element](https://github.com/wisdompeak/LeetCode/tree/master/Template/Inverse_Element) [Graph](https://github.com/wisdompeak/LeetCode/tree/master/Template/Graph) [Bit_Manipulation](https://github.com/wisdompeak/LeetCode/tree/master/Template/Bit_manipulation) -[Combination-Number](https://github.com/wisdompeak/LeetCode/tree/master/Template/Combination-Number) -[RB_Tree](https://github.com/wisdompeak/LeetCode/tree/master/Template/RB_Tree) -[二维子矩阵求和](https://github.com/wisdompeak/LeetCode/tree/master/Template/Sub_Rect_Sum_2D) +[RB_Tree](https://github.com/wisdompeak/LeetCode/tree/master/Template/RB_Tree) +[Binary_Lift](https://github.com/wisdompeak/LeetCode/tree/master/Template/Binary_Lift) +[Union_Find](https://github.com/wisdompeak/LeetCode/tree/master/Template/Union_Find) +[二维子矩阵求和](https://github.com/wisdompeak/LeetCode/tree/master/Template/Sub_Rect_Sum_2D) [二维差分数组](https://github.com/wisdompeak/LeetCode/tree/master/Template/Diff_Array_2D) [CPP_LANG](https://github.com/wisdompeak/LeetCode/tree/master/Template/CPP_LANG) + +#### [SQL](https://github.com/wisdompeak/LeetCode/tree/master/SQL) diff --git a/Recursion/087.Scramble-String/087.Scramble-String.cpp b/Recursion/087.Scramble-String/087.Scramble-String.cpp index a9a6c2a8a..854ab8ba1 100644 --- a/Recursion/087.Scramble-String/087.Scramble-String.cpp +++ b/Recursion/087.Scramble-String/087.Scramble-String.cpp @@ -1,25 +1,54 @@ class Solution { + int memo[31][31][31]; + int n; + string s1, s2; public: - bool isScramble(string s1, string s2) + bool isScramble(string s1, string s2) { - int n=s1.size(); - if (n==1) return (s1==s2); + n = s1.size(); + this->s1 = s1; + this->s2 = s2; - string temp1=s1; - sort(temp1.begin(),temp1.end()); - string temp2=s2; - sort(temp2.begin(),temp2.end()); - if (temp1!=temp2) - return false; + for (int i=0; i& piles) { int n = piles.size(); - for (int i=0; i<=100; i++) - for (int j=0; j<=100; j++) - dp[i][j] = 0; - sufsum[n] = 0; + suf[n] = 0; for (int i=n-1; i>=0; i--) - sufsum[i] = sufsum[i+1]+piles[i]; + suf[i] = suf[i+1]+piles[i]; + return solve(0, 1, piles); } - + int solve(int i, int M, vector& piles) { if (i==piles.size()) return 0; - if (dp[i][M]!=0) return dp[i][M]; - + if (dp[i][M]!=0) + return dp[i][M]; + + int sum = 0; for (int x=1; x<=2*M; x++) { - if (i+x>piles.size()) break; - dp[i][M] = max(dp[i][M], sufsum[i] - solve(i+x, max(x,M), piles)); + if (i+x-1>=piles.size()) break; + sum += piles[i+x-1]; + dp[i][M] = max(dp[i][M], sum + suf[i+x] - solve(i+x, max(x,M), piles)); } return dp[i][M]; } diff --git a/Recursion/1415.The-k-th-Lexicographical-String-of-All-Happy-Strings-of-Length-n/Readme.md b/Recursion/1415.The-k-th-Lexicographical-String-of-All-Happy-Strings-of-Length-n/Readme.md index 4bd11e3cc..2757aebc5 100644 --- a/Recursion/1415.The-k-th-Lexicographical-String-of-All-Happy-Strings-of-Length-n/Readme.md +++ b/Recursion/1415.The-k-th-Lexicographical-String-of-All-Happy-Strings-of-Length-n/Readme.md @@ -6,6 +6,7 @@ #### 解法2 更聪明点的递归。 -当我们尝试填写长度为n的字符串的首字母时,无论首字母是什么,之后的n-1位都有pow(2,n-1)种填写方法。所以我们用k/pow(2,n-1)就可以确定此时的首字母ch应该是字母表的第几个。注意这里的k应该用0-index更为方便。比如k=0,那么ch应该就是'a',如果k=1,那么ch应该就是'b'. +当我们尝试填写长度为n的字符串的首字母时,无论首字母是什么,之后的n-1位都有pow(2,n-1)种填写方法。所以我们用`t = k/pow(2,n-1)`就可以确定此时的首字母ch应该是字母表的第几个。注意这里的k和t都用0-index更为方便。比如t=0,那么ch应该就是'a',如果t=1,那么ch应该就是'b'. + +但是我们还需要考虑到之前一位的制约。如果发现计算得到的ch比上一位字母要大,那么意味着实际填写的ch还需要再加1。比如,上一个位置是'a',本轮计算得到`t=1`,意味着我们需要跳过`2^(n-1)`种排列。但注意这`2^(n-1)`种排列并不是对应的`axx..xx`,因为它与上一个位置'a'冲突。所以我们只能认为这`2^(n-1)`种排列对应的是`bxx..xx`。故跳过他们之后,我们认为本位置必须是填写`c`. -但是我们还需要考虑到之前一位的制约。如果发现计算得到的ch比上一位字母要大,那么意味着当前字母基数应该加1。因为此位我们不能尝试和前面一样的字母,所以会少pow(2,n-1)的可能性。 diff --git a/Recursion/1545.Find-Kth-Bit-in-Nth-Binary-String/1545.Find-Kth-Bit-in-Nth-Binary-String.cpp b/Recursion/1545.Find-Kth-Bit-in-Nth-Binary-String/1545.Find-Kth-Bit-in-Nth-Binary-String.cpp new file mode 100644 index 000000000..7cf549c8a --- /dev/null +++ b/Recursion/1545.Find-Kth-Bit-in-Nth-Binary-String/1545.Find-Kth-Bit-in-Nth-Binary-String.cpp @@ -0,0 +1,28 @@ +class Solution { + vectorlen; +public: + char findKthBit(int n, int k) + { + len.resize(n+1); + len[1] = 1; + for (int i=2; i<=n; i++) + len[i] = len[i-1]*2+1; + + return dfs(n, k); + } + + char dfs(int n, int k) + { + if (n==1) return '0'; + if (k==len[n]/2+1) return '1'; + if (k>children[100005]; + LL memo[100005][2]; +public: + long long maxScore(vector>& edges) + { + int n = edges.size(); + int root = -1; + for (int i=0; inext[55]; + int n; + int count[55]; + int plan0[55]; + int plan1[55]; + int val[55]; +public: + int minimumTotalPrice(int n, vector>& edges, vector& price, vector>& trips) + { + for (auto& edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].push_back(b); + next[b].push_back(a); + } + for (int i=0; i=min_sum && digitSum<=max_sum) ret = (ret+1)%M; + return ret; + } + + int calculate(string& s) + { + int ret = 0; + for (auto ch:s) ret += ch-'0'; + return ret; + } + + LL CountNoGreater(string num, int max_sum) + { + vector>>memo(2, vector>(25, vector(405, -1))); + return dfs(num, max_sum, 0, 0, true, memo); + } + + LL dfs(string num, int max_sum, int i, int sum, bool isSame, vector>>&memo) + { + if (sum > max_sum) return 0; + if (memo[isSame][i][sum]!=-1) return memo[isSame][i][sum]; + if (i==num.size()) return 1; + + LL ret = 0; + if (!isSame) + { + for (int k=0; k<=9; k++) + { + ret += dfs(num, max_sum, i+1, sum+k, false, memo); + ret %= M; + } + } + else + { + for (int k=0; k<(num[i]-'0'); k++) + { + ret += dfs(num, max_sum, i+1, sum+k, false, memo); + ret %= M; + } + ret += dfs(num, max_sum, i+1, sum+(num[i]-'0'), true, memo); + ret %= M; + } + + memo[isSame][i][sum] = ret; + return ret; + } +}; + + diff --git a/Recursion/2719.Count-of-Integers/Readme.md b/Recursion/2719.Count-of-Integers/Readme.md new file mode 100644 index 000000000..e578450c2 --- /dev/null +++ b/Recursion/2719.Count-of-Integers/Readme.md @@ -0,0 +1,15 @@ +### 2719.Count-of-Integers + +求介于两个范围[low, high]之间的、符合条件的元素个数,一个非常常见的套路,就是只写一个求不高于某上界、符合条件的元素个数`NoGreaterThan`.这样答案就是`NoGreaterThan(high)-NoGreaterThan(low-1)`. + +本题有两个不同类型的范围限制,数值大小的范围和digitsum的范围。我们用同样的套路,写函数`NoGreaterThan(string num, int max_sum)`,求数值上不超过num,digitSum不超过max_sum的元素个数,这样最终答案就是 +``` +return (NoGreaterThan(num2, max_sum)-NoGreaterThan(num2, min_sum-1)) + - (NoGreaterThan(num1-1, max_sum)-NoGreaterThan(num1-1, min_sum-1)); +``` + +在编写`NoGreaterThan`的时候,我们递归考察num的每个位置,尝试可以填写哪些digits。用记忆化来避免重复的函数调用。 + +其中一个比较重要的逻辑就是,如果我们给前i位设置的digits比num对应前缀要小,那么第i位上我们可以任意设置0~9都可以满足要求(即不超过num)。反之,如果给前i位设置的digits与num的对应前缀完全吻合,那么在第i位上的设置就不能超过num[i](否则就超过了num)。所以递归的时候我们需要有一个bool量的标记,表示在处理当前位i之前,我们是否设置了完全与num前缀相同的digits。 + +此外,对于cpp而言,我们比较难直接得到num-1的字符串形式。技巧是我们将num1单独处理即可。 diff --git a/Recursion/2801.Count-Stepping-Numbers-in-Range/2801.Count-Stepping-Numbers-in-Range.cpp b/Recursion/2801.Count-Stepping-Numbers-in-Range/2801.Count-Stepping-Numbers-in-Range.cpp new file mode 100644 index 000000000..1aa77f62c --- /dev/null +++ b/Recursion/2801.Count-Stepping-Numbers-in-Range/2801.Count-Stepping-Numbers-in-Range.cpp @@ -0,0 +1,78 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { +public: + int countSteppingNumbers(string low, string high) + { + LL ret = helper(high) - helper(low); + ret = (ret + M) % M; + ret = (ret + check(low) + M) % M; + + return ret; + } + + bool check(string s) + { + for (int i=1; i= 0) + ret = (ret + dfs(len-1, prev-1, false, num, memo)) % M; + } + else + { + int D = num[n-len] - '0'; + if (prev+1 < D) + ret += dfs(len-1, prev+1, false, num, memo); + else if (prev+1 == D) + ret += dfs(len-1, prev+1, true, num, memo); + ret %= M; + + if (prev-1 >= 0 && prev-1 < D) + ret += dfs(len-1, prev-1, false, num, memo); + else if (prev-1 >= 0 && prev-1 == D) + ret += dfs(len-1, prev-1, true, num, memo); + ret %= M; + } + + memo[len][prev][isSame] = ret; + return ret; + } +}; diff --git a/Recursion/2801.Count-Stepping-Numbers-in-Range/Readme.md b/Recursion/2801.Count-Stepping-Numbers-in-Range/Readme.md new file mode 100644 index 000000000..ce7f3d2d0 --- /dev/null +++ b/Recursion/2801.Count-Stepping-Numbers-in-Range/Readme.md @@ -0,0 +1,13 @@ +### 2801.Count-Stepping-Numbers-in-Range + +首先依据套路转化为前缀之差的形式:`return helper(high) - helper(low) + check(low)`. 其中helper(num)表示求[1,num]区间内符合要求的数的个数。 + +我们用dfs的方法来这个填充每一位。设计`dfs(len, prev, isSame)`表示在当前“状态”最终会有多少个合法的数字,其中len表示还有多少位需要填充,prev表示上一位填充的数字是什么,isSame表示之前填充的所有数字是否与num的前缀贴合。 + +1. 如果isSame==false,那么只要`prev+1<=9`,那么就可以在当前位填充prev+1;只要`prev-1>=0`,那么就可以在当前位填充prev-1. 递归函数里的isSame都是false。 + +2. 如果isSame==true,令当前位置上num的数字是D,那么只要`prev+1=0 && prev-1k = k; + return helper(high, k) - helper(low-1, k); + } + + int helper(LL num, int k) + { + int memo[11][2][22][22]; + memset(memo, -1, sizeof(memo)); + + string Num = to_string(num); + int n = Num.size(); + + int ret = 0; + for (int len = 2; len < n; len+=2) + { + for (int d=1; d<=9; d++) + ret += dfs(len-1, false, (d%2==0)*2-1, d%k, Num, memo); + } + + if (n%2==0) + { + int D = Num[0]-'0'; + for (int d=1; d=x) return y-x; + + if (memo[x]!=0) return memo[x]; + + int ret = INT_MAX/2; + ret = min(ret, minimumOperationsToMakeEqual( (x-(x%11))/11, y) + x%11+1); + ret = min(ret, minimumOperationsToMakeEqual( (x+ (11-x%11))/11, y) + (11-x%11) + 1); + + ret = min(ret, minimumOperationsToMakeEqual( (x-(x%5))/5, y) + x%5+1); + ret = min(ret, minimumOperationsToMakeEqual( (x+(5-x%5))/5, y) + (5-x%5)+1); + + ret = min(ret, x-y); + + memo[x] = ret; + + return ret; + } +}; diff --git a/Recursion/2998.Minimum-Number-of-Operations-to-Make-X-and-Y-Equal/Readme.md b/Recursion/2998.Minimum-Number-of-Operations-to-Make-X-and-Y-Equal/Readme.md new file mode 100644 index 000000000..7116e1b72 --- /dev/null +++ b/Recursion/2998.Minimum-Number-of-Operations-to-Make-X-and-Y-Equal/Readme.md @@ -0,0 +1,10 @@ +### 2998.Minimum-Number-of-Operations-to-Make-X-and-Y-Equal + +因为除法操作最高效,所有的增减操作都是为了能够凑出除法操作。所以当x>y时,我们想要将x拉低至y,只需要考虑以下五种操作: +1. 增加x,使得x能被11整除 +2. 减小x,使得x能被11整除 +3. 增加x,使得x能被5整除 +4. 减小x,使得x能被5整除 +5. 直接将x与y拉平。 + +此外,本题需要记忆化来提升效率。 diff --git a/Recursion/2999.Count-the-Number-of-Powerful-Integers/2999.Count-the-Number-of-Powerful-Integers.cpp b/Recursion/2999.Count-the-Number-of-Powerful-Integers/2999.Count-the-Number-of-Powerful-Integers.cpp new file mode 100644 index 000000000..c345b0ae5 --- /dev/null +++ b/Recursion/2999.Count-the-Number-of-Powerful-Integers/2999.Count-the-Number-of-Powerful-Integers.cpp @@ -0,0 +1,43 @@ +using LL = long long; +class Solution { +public: + long long numberOfPowerfulInt(long long start, long long finish, int limit, string s) + { + return helper(to_string(finish), limit, s) - helper(to_string(start-1), limit, s); + } + + LL helper(string a, int limit, string s) + { + if (a.size() < s.size()) return 0; + return dfs(a, s, limit, 0, true); + } + + LL dfs(string a, string s, int limit, int k, bool same) + { + if (a.size() - k == s.size()) + { + int len = s.size(); + if (!same || a.substr(a.size()-len, len) >= s) return 1; + else return 0; + } + + LL ret = 0; + if (!same) + { + int d = a.size()-s.size()-k; + ret = pow(1+limit, d); + } + else + { + for (int i=0; i<=limit; i++) + { + if (i > a[k]-'0') break; + else if (i == a[k]-'0') + ret += dfs(a, s, limit, k+1, true); + else + ret += dfs(a, s, limit, k+1, false); + } + } + return ret; + } +}; diff --git a/Recursion/2999.Count-the-Number-of-Powerful-Integers/Readme.md b/Recursion/2999.Count-the-Number-of-Powerful-Integers/Readme.md new file mode 100644 index 000000000..fc0cc3ebf --- /dev/null +++ b/Recursion/2999.Count-the-Number-of-Powerful-Integers/Readme.md @@ -0,0 +1,7 @@ +### 2999.Count-the-Number-of-Powerful-Integers + +首先,对于区间内的计数,常用的技巧就是转化为`helper(to_string(finish), limit, s) - helper(to_string(start-1), limit, s)`,其中`helper(string a, int limit, string s)`表示在[1:a]区间内有多少符合条件的数(即每个digit不超过limit且后缀为s)。 + +接下来写helper函数。令上限a的长度为d,那么我们计数的时候只需要逐位填充、循环d次即可。对于第k位而言,分两种情况: +1. 如果填充的前k-1位小于a同样长度的前缀,那么第k位可以任意填充0 ~ limit都不会超过上限a。甚至从第k+1位起,直至固定的后缀s之前,总共有`d = a.size()-s.size()-k`位待填充的数字,都可以任意填充为0~limit。故直接返回计数结果:`pow(1+limit, d)`. +2. 如果填充的前k-1位等于a同样长度的前缀,那么第k位可以填充为0 ~ min(limit, a[k])。确定之后,接下来递归处理下一位即可。注意,如果填充为a[k]的话,需要告知递归函数“已构造的前缀继续与a相同”,否则告知递归函数“已构造的前缀小于a”。这样下一轮递归函数知道选择哪一个分支。 diff --git a/Recursion/3307.Find-the-K-th-Character-in-String-Game-II/3307.Find-the-K-th-Character-in-String-Game-II.cpp b/Recursion/3307.Find-the-K-th-Character-in-String-Game-II/3307.Find-the-K-th-Character-in-String-Game-II.cpp new file mode 100644 index 000000000..5bce51cfe --- /dev/null +++ b/Recursion/3307.Find-the-K-th-Character-in-String-Game-II/3307.Find-the-K-th-Character-in-String-Game-II.cpp @@ -0,0 +1,31 @@ +using LL = long long; +class Solution { +public: + char kthCharacter(long long k, vector& operations) + { + LL n = 1; + int t = 0; + while (n=0; i--) + { + if (k>n/2) + { + if (operations[i]==0) + k = k-n/2; + else + { + k = k-n/2; + count++; + } + } + n/=2; + } + return 'a' + (count) % 26; + } +}; diff --git a/Recursion/3307.Find-the-K-th-Character-in-String-Game-II/Readme.md b/Recursion/3307.Find-the-K-th-Character-in-String-Game-II/Readme.md new file mode 100644 index 000000000..03d131228 --- /dev/null +++ b/Recursion/3307.Find-the-K-th-Character-in-String-Game-II/Readme.md @@ -0,0 +1,5 @@ +### 3307.Find-the K-th-Character-in-String-Game-II + +假设当前总共有n个字符,求其中的第k个。显然,如果k是在前n/2里,那么等效于求`dfs(n/2,k)`。如果是在后半部分,那么它其实是前半部分里第`k-n/2`个字符shift零次或一次(取决于operation类型)之后的结果,即等效于`dfs(n/2,k-n/2)+1`. 以此递归处理即可。时间就是logN. + +此题类似 1545.Find-Kth-Bit-in-Nth-Binary-String diff --git a/Recursion/3490.Count-Beautiful-Numbers/3490.Count-Beautiful-Numbers.cpp b/Recursion/3490.Count-Beautiful-Numbers/3490.Count-Beautiful-Numbers.cpp new file mode 100644 index 000000000..0891090f8 --- /dev/null +++ b/Recursion/3490.Count-Beautiful-Numbers/3490.Count-Beautiful-Numbers.cpp @@ -0,0 +1,47 @@ +using State = tuple; + +class Solution { +public: + map memo; + vector digits; + + int dfs(int pos, int sum, int product, bool tight, bool leading_zero) + { + if (pos == digits.size()) { + return (sum > 0) && (product % sum == 0); + } + + State key = {pos, sum, product, tight, leading_zero}; + if (memo.find(key) != memo.end()) return memo[key]; + + int limit = (tight ? digits[pos] : 9); + int res = 0; + + for (int d = 0; d <= limit; d++) + { + res += dfs(pos + 1, sum + d, (leading_zero && d == 0) ? 1 : product * d, tight && (d == limit), leading_zero && (d == 0)); + } + + return memo[key] = res; + } + + int count(int T) + { + if (T <= 0) return 0; + digits.clear(); + memo.clear(); + + while (T > 0) + { + digits.push_back(T % 10); + T /= 10; + } + reverse(digits.begin(), digits.end()); + return dfs(0, 0, 1, true, true); + } + + int beautifulNumbers(int l, int r) + { + return count(r) - count(l-1); + } +}; diff --git a/Recursion/3490.Count-Beautiful-Numbers/Readme.md b/Recursion/3490.Count-Beautiful-Numbers/Readme.md new file mode 100644 index 000000000..542d37d94 --- /dev/null +++ b/Recursion/3490.Count-Beautiful-Numbers/Readme.md @@ -0,0 +1,20 @@ +### 3490.Count-Beautiful-Numbers + +常见的数位DP或者递归搜索。本质我们需要设计一个函数count(T)来记录0-T之间所有符合条件的数。 + +因为beautiful number最多只有10位,每个位置最多只有0-9共十种填法,我们可以逐位搜索。搜索过程中,第pos个位置上的可选决策受到两个先前状态的制约: +1. 该位置是否贴近上限T。如果pos之前的选择都是贴着上限T,那么在第pos位上,我们的选择上限也只能是T[pos],否则上限可以是9. +2. 该位置是否是先导零。如果pos之前的选择全部都是0,那么在pos位置之前记录的乘积应该强制认作是1。这么做是为了处理这样一种情况:pos之前都是0,并且pos位也想取零。如果没有这条规则,那么递归到后面的乘积就永远是零了。 + +由此,我们一旦做出了pos位置上的决策,在往后递归的时候,也需要相应更新isTight和isLeadingZero这两个状态。 + +递归需要记忆化的支持。本题记忆化的状态就是递归函数的参数:pos, sum, product, isTight, isLeadingZero。我们可以用tuple作为key,加上有序map来存储访问过的状态。 + +有人会问product的个数会不会很大?事实上9个digit想乘,可以得到的不同的乘积并不大。 +``` +st = {1} # 空集的乘积(乘法单位元) +for _ in range(9): # 9 个数相乘 + st = set(x * d for x in st for d in range(10)) # 每个数从 0 到 9 +print(len(st)) # 3026 +``` +总的记忆化状态数目最多`9*81*3000*2*2=8748000`,恰好可以接受。 diff --git a/Recursion/3614.Process-String-with-Special-Operations-II/3614.Process-String-with-Special-Operations-II.cpp b/Recursion/3614.Process-String-with-Special-Operations-II/3614.Process-String-with-Special-Operations-II.cpp new file mode 100644 index 000000000..c3ff18731 --- /dev/null +++ b/Recursion/3614.Process-String-with-Special-Operations-II/3614.Process-String-with-Special-Operations-II.cpp @@ -0,0 +1,44 @@ +using ll = long long; +class Solution { +public: + char processStr(string s, long long k) { + k++; + int n = s.size(); + s = "#"+s; + + vectorlen(n+1); + for (int i=1; i<=n; i++) { + char c = s[i]; + if ('a'<=c && c<='z') { + len[i] = len[i-1]+1; + } else if (c=='*') { + len[i] = len[i-1]==0? 0: len[i-1]-1; + } else if (c=='#') { + len[i] = len[i-1] * 2; + } else if (c=='%') { + len[i] = len[i-1]; + } + } + if (k>len[n] || k==0) return '.'; + + for (int t=n; t>=1; t--) { + char c = s[t]; + ll before = len[t-1]; + ll after = len[t]; + + if ('a'<=c && c<='z') { + if (k==after) + return c; + } else if (c=='*') { + k = k; + } else if (c=='#') { + if (k> before) + k = k-before; + } else if (c=='%') { + k = before+1-k; + } + } + + return '.'; + } +}; diff --git a/Recursion/3614.Process-String-with-Special-Operations-II/Readme.md b/Recursion/3614.Process-String-with-Special-Operations-II/Readme.md new file mode 100644 index 000000000..37f9e8776 --- /dev/null +++ b/Recursion/3614.Process-String-with-Special-Operations-II/Readme.md @@ -0,0 +1,13 @@ +### 3614.Process-String-with-Special-Operations-II + +很显然,构造完全之后的字符串非常长,我们不可能在此基础上定位第k个元素。此题极大概率是递归反推。 + +我们分类思考每种操作下的第k个字符(注意是1-index)是什么情况: +1. 添加一个字符,使得长度从a变成了a+1. 如果k=a+1,那么答案就是最新添加的字符。否则,递归求原字符串里的第k个。 +2. 删减最后一个字符,使得长度从a变成了a-1. 这种情况下k不可能是a(否则无解),因此等效于递归求原字符串里的第k个。 +3. 将字符串copy+append,使得长度从a变成了2a. 显然如果k<=a,递归求原字符串里的第k个。如果k>a,递归求原字符串里的第k-a个。 +4. 将字符串反转,长度依然是k。显然,递归求原字符串里的第a+1-k个。 + +综上,只要知道每个回合的操作和长度变化,我们就可以把“求当前字符串的第k个元素”,逆推为“求前一个回合字符串的第k'个元素”,其中k'的计算如上。 + +注意,这个题可能无解。逆推的前提是正向的变化合法存在。所以我们必须先正向走一遍,记录下每个回合的字符串长度,最后检查k是否在最终长度的范围内。 diff --git a/Recursion/440.K-th-Smallest-in-Lexicographical-Order/440.K-th-Smallest-in-Lexicographical-Order.cpp b/Recursion/440.K-th-Smallest-in-Lexicographical-Order/440.K-th-Smallest-in-Lexicographical-Order.cpp deleted file mode 100644 index c0d853bbf..000000000 --- a/Recursion/440.K-th-Smallest-in-Lexicographical-Order/440.K-th-Smallest-in-Lexicographical-Order.cpp +++ /dev/null @@ -1,49 +0,0 @@ -class Solution { - -public: - int findKthNumber(int n, int k) - { - return FindKthNumberBeginWith(0,n,k); - } - - // return the Lexicographically Kth element that begin with the prefix - // excluding the prefix itself - int FindKthNumberBeginWith(int prefix, int n, int k) - { - if (k==0) return prefix; - - for (int i=(prefix==0?1:0); i<=9; i++) - { - int count = TotalNumbersBeginWith(prefix*10+i,n); - if (countk的话,我们就确定了首元素必须是1,进而考虑第二个数字,也是从1的可能性考虑起--我们发现,这就是在递归重复之前的步骤. - -代码的流程大致如下: -```cpp -int FindKthNumberBeginWith(prefix,k) -{ - if (k==0) return prefix; - - for i=0 to 9 - { - count = TotalNumbersBeginWith(prefix+[i]); - if (countbitArr; // Note: all arrays are 1-index + vectornums; long long M = 1e9+7; - // increase nums[i] by delta (1-index) + void init(int N) + { + this->N = N; + bitArr.resize(N+1); + nums.resize(N+1); + } + + // increase nums[i] by delta void updateDelta(int i, long long delta) { int idx = i; - while (idx <= MAX_N) + while (idx <= N) { bitArr[idx]+=delta; bitArr[idx] %= M; @@ -18,7 +23,7 @@ class Solution { } } - // sum of a range nums[1:j] inclusively, 1-index + // sum of a range nums[1:j] inclusively long long queryPreSum(int idx){ long long result = 0; while (idx){ @@ -32,23 +37,29 @@ class Solution { // sum of a range nums[i:j] inclusively long long sumRange(int i, int j) { return queryPreSum(j)-queryPreSum(i-1); - } - + } +}; + +using LL = long long; +LL OFFSET = 1e5+10; +LL M = 1e9+7; +class Solution { public: int subarraysWithMoreZerosThanOnes(vector& nums) { - cout<> getSkyline(vector>& buildings) + vector> getSkyline(vector>& buildings) { - vector>edges; - for (int i=0;i>>Map; // pos->{height, flag} + for (auto building: buildings) { - edges.push_back({buildings[i][0],-buildings[i][2]}); - edges.push_back({buildings[i][1],buildings[i][2]}); + Map[building[0]].push_back({building[2], 1}); + Map[building[1]].push_back({building[2], -1}); } - sort(edges.begin(),edges.end()); - - multisetSet={0}; - vector>results; - int cur=0; - - for (int i=0; iSet; + vector>rets; + for (auto& [pos, pairs]: Map) { - if (edges[i][1]<0) - Set.insert(-edges[i][1]); - else - Set.erase(Set.lower_bound(edges[i][1])); - - int H=*Set.rbegin(); - if (cur!=H) - results.push_back({edges[i][0],H}); - cur=H; + for (auto& [height, flag]: pairs) + { + if (flag == 1) + Set.insert(height); + else + Set.erase(Set.find(height)); + } + + int H = Set.empty() ? 0: *Set.rbegin(); + if (rets.empty() || rets.back()[1]!=H) + rets.push_back({pos, H}); } - - return results; + + return rets; } }; diff --git a/Segment_Tree/218.The-Skyline-Problem/218.The-Skyline-Problem_SegTree_v1.cpp b/Segment_Tree/218.The-Skyline-Problem/218.The-Skyline-Problem_SegTree_v1.cpp new file mode 100644 index 000000000..63c87774c --- /dev/null +++ b/Segment_Tree/218.The-Skyline-Problem/218.The-Skyline-Problem_SegTree_v1.cpp @@ -0,0 +1,81 @@ +// 支持动态开点 + +class SegTree +{ + public: + int start,end,status; + SegTree* left; + SegTree* right; + SegTree(int a, int b, int s):start(a),end(b),status(s),left(NULL),right(NULL){} + + void remove(SegTree* &node) + { + if (node==NULL) return; + remove(node->left); + remove(node->right); + delete node; + node = NULL; + return; + } + + void setStatus(int a, int b, int s) + { + if (a>=end || b<=start) + return; + if (a<=start && b>=end && s>=status) + { + remove(left); + remove(right); + status = s; + return; + } + if (a<=start && b>=end && ssetStatus(a,b,s); + right->setStatus(a,b,s); + status = max(left->status,right->status); + } +}; + +class Solution { +public: + vector>results; + vector> getSkyline(vector>& buildings) + { + if (buildings.size()==0) return {}; + + SegTree* root = new SegTree(0,INT_MAX,0); + for (auto q:buildings) + root->setStatus(q[0],q[1],q[2]); + + DFS(root); + if (results.back()[1]!=0) results.push_back({INT_MAX,0}); + + vector>filteredResults; + for (auto p: results) + { + if (filteredResults.size()!=0 && p[1]==filteredResults.back()[1]) + continue; + filteredResults.push_back({p[0],p[1]}); + } + if (filteredResults.size()!=0 && filteredResults[0][1]==0) filteredResults.erase(filteredResults.begin()); + return filteredResults; + } + + void DFS(SegTree* node) + { + if (node->left==NULL) + results.push_back({node->start,node->status}); + else + { + DFS(node->left); + DFS(node->right); + } + } +}; diff --git a/Segment_Tree/218.The-Skyline-Problem/218.The-Skyline-Problem_SegTree_v2.cpp b/Segment_Tree/218.The-Skyline-Problem/218.The-Skyline-Problem_SegTree_v2.cpp new file mode 100644 index 000000000..2327a837c --- /dev/null +++ b/Segment_Tree/218.The-Skyline-Problem/218.The-Skyline-Problem_SegTree_v2.cpp @@ -0,0 +1,141 @@ +// 线段树大小在初始化时固定。支持Lazy Tag(延迟标记) + +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + int info; // the maximum value of the range + bool tag; + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + tag = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = max(left->info, right->info); // check with your own logic + } + } + + void pushDown() + { + if (tag==1 && left) + { + left->info = info; + right->info = info; + left->tag = 1; + right->tag = 1; + tag = 0; + } + } + + void updateRange(int a, int b, int val) // set range [a,b] with val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info = val; + tag = 1; + return; + } + + if (left) + { + pushDown(); + left->updateRange(a, b, val); + right->updateRange(a, b, val); + info = max(left->info, right->info); // write your own logic + } + } + + int queryRange(int a, int b) // query the maximum value within range [a,b] + { + if (b < start || a > end ) + { + return INT_MIN; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + int ret = max(left->queryRange(a, b), right->queryRange(a, b)); + info = max(left->info, right->info); // check with your own logic + return ret; + } + + return info; // should not reach here + } + +}; + +class Solution { + vector>height; // {idx, h} +public: + vector> getSkyline(vector>& buildings) + { + setSet; + for (auto & building: buildings) + { + Set.insert(building[0]); + Set.insert(building[1]); + } + int id = 0; + unordered_mappos2idx; + unordered_mapidx2pos; + for (auto x:Set) + { + pos2idx[x] = id; + idx2pos[id] = x; + id++; + } + + int n = pos2idx.size(); + SegTreeNode* root = new SegTreeNode(0, n-1, 0); + + sort(buildings.begin(), buildings.end(), [](vector&a, vector&b){return a[2]updateRange(pos2idx[building[0]], pos2idx[building[1]]-1, building[2]); + } + + DFS(root); + + vector>rets; + for (int i=0; istart==node->end || node->tag==1) + { + height.push_back({node->start, node->info}); + return; + } + DFS(node->left); + DFS(node->right); + } + +}; diff --git a/Segment_Tree/218.The-Skyline-Problem/218.The-Skyline-Problem_SegmentTree_lazyTag.cpp b/Segment_Tree/218.The-Skyline-Problem/218.The-Skyline-Problem_SegmentTree_lazyTag.cpp deleted file mode 100644 index 861a04078..000000000 --- a/Segment_Tree/218.The-Skyline-Problem/218.The-Skyline-Problem_SegmentTree_lazyTag.cpp +++ /dev/null @@ -1,124 +0,0 @@ -class Solution { - class SegTreeNode - { - public: - SegTreeNode* left; - SegTreeNode* right; - int start, end; - int info; - int tag; - SegTreeNode(int a, int b):start(a),end(b),info(0),tag(0),left(NULL),right(NULL){} - }; - - void init(SegTreeNode* node, int a, int b) // init for range [a,b] - { - if (a==b) - { - node->info = 0; - return; - } - int mid = (a+b)/2; - if (node->left==NULL) - { - node->left = new SegTreeNode(a, mid); - node->right = new SegTreeNode(mid+1, b); - } - init(node->left, a, mid); - init(node->right, mid+1, b); - - node->info = 0; // write your own logic - } - - void updateRange(SegTreeNode* node, int a, int b, int val) - { - if (b < node->start || a > node->end ) - return; - if (node->start == node->end) - { - node->info = max(node->info, val); - return; - } - if (a <= node->start && node->end <=b && val >= node->info) - { - // write your own logic - node->info = val; - node->tag = 1; - return; - } - - pushDown(node); - node->info = max(node->info, val); - - updateRange(node->left, a, b, val); - updateRange(node->right, a, b, val); - } - - - void pushDown(SegTreeNode* node) - { - if (node->tag!=0) - { - node->left->info = node->info; - node->right->info = node->info; - node->left->tag = 1; - node->right->tag = 1; - node->tag = 0; - } - } - - vector>height; // {idx, h} -public: - vector> getSkyline(vector>& buildings) - { - setSet; - for (auto & building: buildings) - { - Set.insert(building[0]); - Set.insert(building[1]); - } - int id = 0; - unordered_mappos2idx; - unordered_mapidx2pos; - for (auto x:Set) - { - pos2idx[x] = id; - idx2pos[id] = x; - id++; - } - - int n = pos2idx.size(); - SegTreeNode* root = new SegTreeNode(0, n-1); - init(root, 0, n-1); - - sort(buildings.begin(), buildings.end(), [](vector&a, vector&b){return a[2]>rets; - for (int i=0; istart==node->end || node->tag==1) - { - height.push_back({node->start, node->info}); - return; - } - DFS(node->left); - DFS(node->right); - } - -}; diff --git a/Segment_Tree/218.The-Skyline-Problem/218.The-Skyline-Problem_segTree.cpp b/Segment_Tree/218.The-Skyline-Problem/218.The-Skyline-Problem_segTree.cpp deleted file mode 100644 index ced18e620..000000000 --- a/Segment_Tree/218.The-Skyline-Problem/218.The-Skyline-Problem_segTree.cpp +++ /dev/null @@ -1,78 +0,0 @@ -class Solution { - class SegTree - { - public: - int start,end,status; - SegTree* left; - SegTree* right; - SegTree(int a, int b, int s):start(a),end(b),status(s),left(NULL),right(NULL){} - - void remove(SegTree* &node) - { - if (node==NULL) return; - remove(node->left); - remove(node->right); - delete node; - node = NULL; - return; - } - - void setStatus(int a, int b, int s) - { - if (a>=end || b<=start) - return; - if (a<=start && b>=end && s>=status) - { - remove(left); - remove(right); - status = s; - return; - } - if (a<=start && b>=end && ssetStatus(a,b,s); - right->setStatus(a,b,s); - status = max(left->status,right->status); - } - }; -public: - vector>results; - vector> getSkyline(vector>& buildings) - { - if (buildings.size()==0) return {}; - - SegTree* root = new SegTree(0,INT_MAX,0); - for (auto q:buildings) - root->setStatus(q[0],q[1],q[2]); - - DFS(root); - if (results.back().second!=0) results.push_back({INT_MAX,0}); - - vector>filteredResults; - for (auto p: results) - { - if (filteredResults.size()!=0 && p.second==filteredResults.back().second) - continue; - filteredResults.push_back({p.first,p.second}); - } - if (filteredResults.size()!=0 && filteredResults[0].second==0) filteredResults.erase(filteredResults.begin()); - return filteredResults; - } - - void DFS(SegTree* node) - { - if (node->left==NULL) - results.push_back({node->start,node->status}); - else - { - DFS(node->left); - DFS(node->right); - } - } -}; diff --git a/Segment_Tree/218.The-Skyline-Problem/Readme.md b/Segment_Tree/218.The-Skyline-Problem/Readme.md index 549e31dac..5787ea49c 100644 --- a/Segment_Tree/218.The-Skyline-Problem/Readme.md +++ b/Segment_Tree/218.The-Skyline-Problem/Readme.md @@ -1,16 +1,12 @@ ### 218.The-Skyline-Problem -#### 解法1:有序容器 +#### 解法1:扫描线 -此题需要设置一个multiSet记录所有的当前下降沿的高度,则*prev(Set.end(),1)就是这个Set里的最大值。 +我们维护一个multiset,按照横轴的位置顺次考虑各个楼的上升沿和下降沿。遇到上升沿就往集合里加入一个H,遇到下降沿就在集合里删除一个H。这样每个时刻,集合里面的最大值,就代表了该位置(及其右边区间)的天际线高度。我们将这些```{位置,高度}```记录下来,就代表了天际线的轮廓。 -首先,将所有的edges放入一个数组,按时间顺序排序,然后顺次遍历考虑:如果是上升沿,则在Set里加入对应高度(即添加一个上升沿);如果是下降沿,则需要在Set里删除对应的高度(即退出当前的下降沿)。 +注意,如果相邻两个位置的高度一样,那么我们可以只保留第一个。 -那何时对results进行更新呢?我们在每次处理edge时,不管是加入上升边沿还是退出下降沿之后,都意味着天际线有可能变动。天际线会变成什么呢?答案是此时Set里的最大值!回想一下,Set里装的是所有当前仍未退出的下降沿,说明他们都在当前可以撑起对应的高度。那么Set里的最大值就是当前天际线的最高值。 - -所以每次查看一个edges,我们都要比较当前的高度(用cur记录)和Set里的最大值进行比较:一旦不同,就用Set里的最大值去加入results,同时也要更新cur。 - -有一个细节需要注意,在生成edges数组时,如果某一个位置同时有上升沿也有下降沿,注意要先考察上升沿,再考察下降沿。也就是要先加入一个上升沿,再退出可能的下降沿。否则类似[[0,2,3],[2,5,3]]的测试例子就会有问题。 +类似的题目有```2158.Amount-of-New-Area-Painted-Each-Day```. #### 解法2:线段树 diff --git a/Segment_Tree/2286.Booking-Concert-Tickets-in-Groups/2286.Booking-Concert-Tickets-in-Groups.cpp b/Segment_Tree/2286.Booking-Concert-Tickets-in-Groups/2286.Booking-Concert-Tickets-in-Groups.cpp new file mode 100644 index 000000000..5d05b6f14 --- /dev/null +++ b/Segment_Tree/2286.Booking-Concert-Tickets-in-Groups/2286.Booking-Concert-Tickets-in-Groups.cpp @@ -0,0 +1,241 @@ +using LL = long long; + +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + int info; // the max height of the range + bool tag; + + SegTreeNode(int a, int b, int val) // init for range [a,b] + { + tag = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = max(left->info, right->info); // write your own logic + } + } + + void pushDown() + { + if (tag==1 && left) + { + left->info = info; + right->info = info; + left->tag = 1; + right->tag = 1; + tag = 0; + } + } + + void updateRange(int a, int b, int val) + { + if (b < start || a > end ) // no intersection + return; + + if (a <= start && end <=b) + { + info = val; + tag = 1; + return; + } + + if (left) + { + pushDown(); + left->updateRange(a, b, val); + right->updateRange(a, b, val); + info = max(left->info, right->info); // write your own logic + } + } + + int queryRange(int a, int b) + { + if (b < start || a > end ) + { + return INT_MIN; // write your own logic + } + if (a <= start && end <=b) + { + return info; // write your own logic + } + + if (left) + { + pushDown(); + int ret = max(left->queryRange(a, b), right->queryRange(a, b)); + info = max(left->info, right->info); + return ret; + } + + return info; + } + +}; + + +class SegTreeNode2 +{ + public: + SegTreeNode2* left = NULL; + SegTreeNode2* right = NULL; + int start, end; + LL info; // the sum value over the range + bool lazy_tag; + LL lazy_val; + + SegTreeNode2(int a, int b, int val) // init for range [a,b] with val + { + lazy_tag = 0; + lazy_val = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode2(a, mid, val); + right = new SegTreeNode2(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + void pushDown() + { + if (lazy_tag==1 && left) + { + left->info = lazy_val * (left->end - left->start + 1); + right->info = lazy_val * (right->end - right->start + 1); + left->lazy_tag = 1; left->lazy_val = lazy_val; + right->lazy_tag = 1; right->lazy_val = lazy_val; + lazy_tag = 0; lazy_val = 0; + } + } + + void updateRange(int a, int b, int val) // set range [a,b] with val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info = val * (end-start+1); + lazy_tag = 1; + lazy_val = val; + return; + } + + if (left) + { + pushDown(); + left->updateRange(a, b, val); + right->updateRange(a, b, val); + info = left->info + right->info; // write your own logic + } + } + + LL queryRange(int a, int b) // query the sum over range [a,b] + { + if (b < start || a > end ) + { + return 0; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + LL ret = left->queryRange(a, b) + right->queryRange(a, b); + info = left->info + right->info; // check with your own logic + return ret; + } + + return info; // should not reach here + } +}; + +class BookMyShow { + int n,m; + vectorseats; + int p = 0; + SegTreeNode* root; + SegTreeNode2* root2; + + +public: + BookMyShow(int n, int m) { + this->n = n; + this->m = m; + seats.resize(n); + for (int i=0; i gather(int k, int maxRow) + { + int l = 0, r = maxRow; + while (lqueryRange(0, mid) >= k ) + r = mid; + else + l = mid+1; + } + + if (root->queryRange(0, l) < k ) + return {}; + + seats[l] -= k; + root->updateRange(l, l, seats[l]); + root2->updateRange(l, l, seats[l]); + + return {l,m-(seats[l]+k)}; + } + + bool scatter(int k, int maxRow) + { + if (root2->queryRange(0, maxRow) < k) + return false; + + while (k>0) + { + int t = min(k, seats[p]); + seats[p] -= t; + root->updateRange(p, p, seats[p]); + root2->updateRange(p, p, seats[p]); + k -= t; + if (seats[p]==0) p++; + } + + return true; + } +}; + +/** + * Your BookMyShow object will be instantiated and called as such: + * BookMyShow* obj = new BookMyShow(n, m); + * vector param_1 = obj->gather(k,maxRow); + * bool param_2 = obj->scatter(k,maxRow); + */ diff --git a/Segment_Tree/2286.Booking-Concert-Tickets-in-Groups/Readme.md b/Segment_Tree/2286.Booking-Concert-Tickets-in-Groups/Readme.md new file mode 100644 index 000000000..5059c6ae3 --- /dev/null +++ b/Segment_Tree/2286.Booking-Concert-Tickets-in-Groups/Readme.md @@ -0,0 +1,9 @@ +### 2286.Booking-Concert-Tickets-in-Groups + +根据题意,我们需要给维护一个数组seats,代表每一行剩余的座椅数目。 + +如果遇到的是scatter,那么我们就从前往后查看每一行,有任何剩余的座位就都分配出去。如果某一行的座位都已经被scatter分配完了,那么这意味着该行及其之前的行都没有空座了,今后可以忽略。所以我们需要一个指针p,来指向当前仍有作为剩余的最小行号。但是这里有一个问题,如果对于某个scatter query,我们试图一行一行地去查询和分配座位后,发现无法满足要求(所有分配的座位必须在指定的maxRow之前)。于是我们需要有一个函数,支持查看[0,maxRow]这个区间内还剩多少个座位。如果查询得知剩余座位不够,就可以直接返回false。如果查询得知剩余座位足够,我们再逐行更新```seats[p]```,将分配完所有座位的行号都置零。显然,这是区间求和,这需要一个选段树或者BIT的数据结构。 + +如果遇到的是gather,那么我们就需要找到最小的行号i,满足```seats[i]>=k```.那么如何高效地实现这个功能呢?也是用线段树。我们需要一个函数,支持查询[0,row]这个区间内的最大值。我们可以通过二分法,快速查到满足条件的最小的row。那么我们就可以更新```seats[row]-=k```. + +所以,我们需要利用两种线段树的模板,分别实现查询rangeSum和rangeMax的功能。当然,也要支持节点数值的动态修改。 diff --git a/Segment_Tree/2407.Longest-Increasing-Subsequence-II/2407.Longest-Increasing-Subsequence-II.cpp b/Segment_Tree/2407.Longest-Increasing-Subsequence-II/2407.Longest-Increasing-Subsequence-II.cpp new file mode 100644 index 000000000..2688003fe --- /dev/null +++ b/Segment_Tree/2407.Longest-Increasing-Subsequence-II/2407.Longest-Increasing-Subsequence-II.cpp @@ -0,0 +1,103 @@ +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + int info; // the maximum value of the range + bool tag; + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + tag = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = max(left->info, right->info); // check with your own logic + } + } + + void pushDown() + { + if (tag==1 && left) + { + left->info = info; + right->info = info; + left->tag = 1; + right->tag = 1; + tag = 0; + } + } + + void updateRange(int a, int b, int val) // set range [a,b] with val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info = val; + tag = 1; + return; + } + + if (left) + { + pushDown(); + left->updateRange(a, b, val); + right->updateRange(a, b, val); + info = max(left->info, right->info); // write your own logic + } + } + + int queryRange(int a, int b) // query the maximum value within range [a,b] + { + if (b < start || a > end ) + { + return INT_MIN; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + int ret = max(left->queryRange(a, b), right->queryRange(a, b)); + info = max(left->info, right->info); // check with your own logic + return ret; + } + + return info; // should not reach here + } + +}; + +class Solution { +public: + int lengthOfLIS(vector& nums, int k) + { + int x = *max_element(nums.begin(), nums.end()); + SegTreeNode* root = new SegTreeNode(0, x, 0); + + int ret = 0; + + for (auto x: nums) + { + int len = root->queryRange(max(0, x-k), max(0, x-1)); + root->updateRange(x, x , len+1); + ret = max(ret, 1+len); + } + + return ret; + + } +}; diff --git a/Segment_Tree/2407.Longest-Increasing-Subsequence-II/Readme.md b/Segment_Tree/2407.Longest-Increasing-Subsequence-II/Readme.md new file mode 100644 index 000000000..454ff5cd4 --- /dev/null +++ b/Segment_Tree/2407.Longest-Increasing-Subsequence-II/Readme.md @@ -0,0 +1,15 @@ +### 2407.Longest-Increasing-Subsequence-II + +本题思考的切入点其实是DP。对于`nums[i]=x`,我们需要检查i之前的所有位置j,如果有`y=nums[j]`的值满足`[x-k,x-1]`之间,那么就有`dp[i]=max(dp[j]+1)`。显然这是一个N^2的解法,该如何改进呢? + +我们可以发现,在索引上,j的位置是不确定的,但是在值域上y的范围是连续的。我们是否可以将求dp[j]转变为求dp[y]呢? + +对于一个连续的区间的最大值,我们有两个想法:线段树求区间最值,或者双端队列求sliding window max。在本题里,随着i的变化,x不是单调的,所以[x-k,x-1]也不是单向移动的sliding window。所以这道题的解题工具就是线段树。 + +此时我们再观察所有元素的值域范围是0到1e5,这就意味着在内存里开辟这么大的线段树是完全可行的。对于线段树的叶子节点v,我们存储以v为结尾的、符合条件的subsequence有多少。注意,v是指数值,而不是index。 + +所以我们的算法就是反复做两步操作: +1. 对于当前`nums[i]=x`,我们在线段树里寻找[x-k,x-1]里最大值len +2. 于是我们可以对线段树的节点x更新为1+len。 + +最终答案就是整棵线段树里叶子节点的最大值。 diff --git a/Segment_Tree/2569.Handling-Sum-Queries-After-Update/2569.Handling-Sum-Queries-After-Update.cpp b/Segment_Tree/2569.Handling-Sum-Queries-After-Update/2569.Handling-Sum-Queries-After-Update.cpp new file mode 100644 index 000000000..c0823686b --- /dev/null +++ b/Segment_Tree/2569.Handling-Sum-Queries-After-Update/2569.Handling-Sum-Queries-After-Update.cpp @@ -0,0 +1,130 @@ +using LL = long long; +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + LL info; // the sum value over the range + bool lazy_tag; // if the child ranges are pending propagation + LL lazy_val; // how many flips needed to be propagated to child ranges. + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + lazy_tag = 0; + lazy_val = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + SegTreeNode(int a, int b, vector& val) // init for range [a,b] with the same-size array val + { + lazy_tag = 0; + lazy_val = 0; + start = a, end = b; + if (a==b) + { + info = val[a]; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + void pushDown() + { + if (lazy_tag==1 && left) + { + if (lazy_val % 2 == 1) + { + left->info = (left->end - left->start + 1) - left->info; + right->info = (right->end - right->start + 1) - right->info; + left->lazy_tag = 1; left->lazy_val += lazy_val; + right->lazy_tag = 1; right->lazy_val += lazy_val; + } + + lazy_tag = 0; lazy_val = 0; + } + } + + void updateRange(int a, int b) // set range [a,b] with flips + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info = (end-start+1) - info; + lazy_tag = 1; + lazy_val += 1; + return; + } + + if (left) + { + pushDown(); + left->updateRange(a, b); + right->updateRange(a, b); + info = left->info + right->info; // write your own logic + } + } + + LL queryRange(int a, int b) // query the sum over range [a,b] + { + if (b < start || a > end ) + { + return 0; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + LL ret = left->queryRange(a, b) + right->queryRange(a, b); + info = left->info + right->info; // check with your own logic + return ret; + } + + return info; // should not reach here + } +}; + +class Solution { +public: + vector handleQuery(vector& nums1, vector& nums2, vector>& queries) + { + int n = nums1.size(); + SegTreeNode* root = new SegTreeNode(0, n-1, nums1); + LL sum = accumulate(nums2.begin(), nums2.end(), 0LL); + vectorrets; + for (auto & query: queries) + { + if (query[0]==1) + root->updateRange(query[1], query[2]); + else if (query[0]==2) + sum += root->queryRange(0, n-1) * query[1]; + else + rets.push_back(sum); + } + + return rets; + } +}; diff --git a/Segment_Tree/2569.Handling-Sum-Queries-After-Update/Readme.md b/Segment_Tree/2569.Handling-Sum-Queries-After-Update/Readme.md new file mode 100644 index 000000000..9d5c69bb6 --- /dev/null +++ b/Segment_Tree/2569.Handling-Sum-Queries-After-Update/Readme.md @@ -0,0 +1,9 @@ +### 2569.Handling-Sum-Queries-After-Update + +本题的本质就是需要高效的区间更新函数来维护nums1,来实现第一类query。对于第二类query,只需要操作`sum += total(nums1)*p`即可,其中total就是对nums1全部元素取和。对于第三类query,就是输出当前的sum。 + +显然我们会用线段树来实现这样的数据结构。 + +在现有的模板中,我们肯定会选择带有“区间求和”功能的模板,即`queryRange(a,b)`可以求得nums1里指定区间[a,b]元素的和。但是“区间更新”的功能需要重写:原本的功能是将区间的数值替换为val,这里的区间更新是将里面的元素全部做0/1翻转,这对区间和会产生什么影响呢?假设原本一段区间[a,b]内记录的元素和是info,那么`updateRange(a,b)`造成的影响其实就是`info = (b-a+1)-info`,改动非常简单。 + +另外,我们还需要对懒标记进行重新的定义。在模板里,`lazy_tag=1`表示该区间的子区间有待更新(即01翻转)。那么`lazy_val`我们需要重新定义为“它的子区间我们还需要翻转多少次”。当我们需要`push_down`的时候,就需要对左右子区间分别进行`lazy_val`次的翻转。 diff --git a/Segment_Tree/2659.Make-Array-Empty/2659.Make-Array-Empty.cpp b/Segment_Tree/2659.Make-Array-Empty/2659.Make-Array-Empty.cpp new file mode 100644 index 000000000..8fea5b15d --- /dev/null +++ b/Segment_Tree/2659.Make-Array-Empty/2659.Make-Array-Empty.cpp @@ -0,0 +1,91 @@ +class BIT{ + public: + int N; + vectorbitArr; // Note: all arrays are 1-index + vectornums; + long long M = 1e9+7; + + void init(int N) + { + this->N = N; + bitArr.resize(N+1); + nums.resize(N+1); + } + + // increase nums[i] by delta + void updateDelta(int i, long long delta) { + int idx = i; + while (idx <= N) + { + bitArr[idx]+=delta; + // bitArr[idx] %= M; + idx+=idx&(-idx); + } + } + + // sum of a range nums[1:j] inclusively + long long queryPreSum(int idx){ + long long result = 0; + while (idx){ + result += bitArr[idx]; + // result %= M; + idx-=idx&(-idx); + } + return result; + } + + // sum of a range nums[i:j] inclusively + long long sumRange(int i, int j) + { + if (i>j) return 0; + return queryPreSum(j)-queryPreSum(i-1); + } +}; + + +class Solution { +public: + long long countOperationsToEmptyArray(vector& nums) + { + int n = nums.size(); + nums.insert(nums.begin(), 0); + BIT bit; + bit.init(n+10); + + for (int i=1; i<=n; i++) + { + bit.updateDelta(i, 1); + } + + mapMap; + for (int i=1; i<=n; i++) + Map[nums[i]] = i; + + long long ret = 0; + int last_p = -1; + for (auto& [v, p]: Map) + { + if (last_p==-1) + { + ret += bit.sumRange(1, p-1); + last_p = p; + bit.updateDelta(p, -1); + continue; + } + + if (last_p <= p) + { + ret += bit.sumRange(last_p, p-1); + } + else + { + ret += bit.sumRange(1, p-1); + ret += bit.sumRange(last_p+1, n); + } + last_p = p; + bit.updateDelta(p, -1); + } + + return ret + n; + } +}; diff --git a/Segment_Tree/2659.Make-Array-Empty/Readme.md b/Segment_Tree/2659.Make-Array-Empty/Readme.md new file mode 100644 index 000000000..deab212a3 --- /dev/null +++ b/Segment_Tree/2659.Make-Array-Empty/Readme.md @@ -0,0 +1,11 @@ +2659.Make-Array-Empty + +假设一个序列里面前三个最小的元素是x,y,z,他们在序列中的位置如下:`***x****z****y*****` + +首先我们必然会x,它是第一个会被消除的元素,那么在x之前的元素我们都会挪动到最后。所以操作的次数是x之前的元素的个数。 + +其次我们需要消除y,那么所有在x与y之间的元素都会被挪动到最后。所需要的操作次数也就是x与y之间的元素的个数。 + +接着我们需要消除z。注意在上一步之后,所有在y之前的元素都已经被挪到最后去了。想要消除z,必须先挪动从y+1到z-1之间的元素,其实是一个wrap around的过程。从原始序列上看,因为z的位置在y的前面,那么我们需要挪动的元素其实包含了[y+1,n-1]以及[0:z-1]两部分。特别注意,我们要扣除掉x,因为它已经被消除了。 + +所以这就提示我们可以用线段树或者BIT,支持任意单个元素的删减操作,并可以高效求出任意一段区间内的剩余元素个数。 diff --git a/Segment_Tree/2916.Subarrays-Distinct-Element-Sum-of-Squares-II/2916.Subarrays-Distinct-Element-Sum-of-Squares-II.cpp b/Segment_Tree/2916.Subarrays-Distinct-Element-Sum-of-Squares-II/2916.Subarrays-Distinct-Element-Sum-of-Squares-II.cpp new file mode 100644 index 000000000..fe25d0520 --- /dev/null +++ b/Segment_Tree/2916.Subarrays-Distinct-Element-Sum-of-Squares-II/2916.Subarrays-Distinct-Element-Sum-of-Squares-II.cpp @@ -0,0 +1,145 @@ +using LL = long long; +LL M = 1e9+7; +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + LL info; // the sum value over the range + LL delta; + bool tag; + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + tag = 0; + delta = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + SegTreeNode(int a, int b, vector& val) // init for range [a,b] with the same-size array val + { + tag = 0; + delta = 0; + start = a, end = b; + if (a==b) + { + info = val[a]; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + void pushDown() + { + if (tag==1 && left) + { + left->info += delta * (left->end - left->start + 1); + left->delta += delta; + right->info += delta * (right->end - right->start + 1); + right->delta += delta; + left->tag = 1; + right->tag = 1; + tag = 0; + delta = 0; + } + } + + void updateRangeBy(int a, int b, int val) // increase range [a,b] by val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info += val * (end-start+1); + delta += val; + tag = 1; + return; + } + + if (left) + { + pushDown(); + left->updateRangeBy(a, b, val+delta); + right->updateRangeBy(a, b, val+delta); + delta = 0; + tag = 0; + info = left->info + right->info; // write your own logic + } + } + + LL queryRange(int a, int b) // query the sum within range [a,b] + { + if (b < start || a > end ) + { + return 0; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + LL ret = left->queryRange(a, b) + right->queryRange(a, b); + info = left->info + right->info; // check with your own logic + return ret; + } + + return info; // should not reach here + } +}; + +class Solution { +public: + int sumCounts(vector& nums) + { + unordered_mapMap; + int n = nums.size(); + vectorprev(n, -1); + for (int i=0; idp(n); + for (int i=0; iqueryRange(j+1, i-1) + i-1-j; + dp[i] += 1; + dp[i] %= M; + root->updateRangeBy(j+1, i, 1); + } + + LL ret = 0; + for (int i=0; ii时,区间实际不存在,故标记0. + +我们再定义`count[a:b]`表示某个区间内的distinct number的数目,`square[a:b]`表示该区间内distinct number的数目的平方。 + +对于位置i,我们在数组里找到相同nums[i]出现的前一个位置k。于是 +1. 对于`j=0,1,...,k`而言,`square[j:i] = square[j:i-1]`. +2. 对于`j=k+1,k+2,...,i-1`而言,`count[j:i] = count[j:i-1]+1,两边平方一下就得到`square[j:i] = square[j:i-1] + 2*count[j:i-1] + 1`. +将两部分相加得到 +``` +sum{square[j:i]} = sum{square[j:i-1]} (for j=0,1,2,...i-1) + 2 * sum{count[j:i-1]} + (i-1-k) (for j=k+1,...i-1) +``` +可见`sum{square[j:i]}`与`sum{square[j:i-1]}`之间存在递推关系。其中相差的部分`sum{count[j:i-1]}`就是之前定义的线段树中在t=i-1时刻,叶子节点区间[k+1, i-1]的元素之和。 + +当我们求出`sum{square[j:i]}`之后,该如何更新这棵线段树呢?显然,只有以k+1,k+2,...i-1开头的这些区间,随着i的加入,distinct number会增1. 所以我们只需要将叶子节点区间[k+1, i-1]的元素统一都增加1即可。 + +最终的答案就是将每个位置i的`sum{square[j:i]}`再加起来。 + diff --git a/Segment_Tree/307.Range-Sum-Query-Mutable/307.Range-Sum-Query-Mutable_BIT.cpp b/Segment_Tree/307.Range-Sum-Query-Mutable/307.Range-Sum-Query-Mutable_BIT.cpp index e738ea721..db7a5ddb3 100644 --- a/Segment_Tree/307.Range-Sum-Query-Mutable/307.Range-Sum-Query-Mutable_BIT.cpp +++ b/Segment_Tree/307.Range-Sum-Query-Mutable/307.Range-Sum-Query-Mutable_BIT.cpp @@ -1,41 +1,76 @@ -class NumArray { -public: - vectorbitArr; - vectornums; +class BIT{ + public: + int N; + vectorbitArr; // Note: all arrays are 1-index + vectornums; + long long M = 1e9+7; - NumArray(vector& nums) { - this->nums = nums; - bitArr.resize(nums.size()+1); - for (int i=0; iN = N; + bitArr.resize(N+1); + nums.resize(N+1); } - void update(int i, int val){ - my_update(i, val-nums[i]); - nums[i] = val; - } - - void my_update(int i, int delta) { - int idx = i+1; - while (idxnums; +public: + NumArray(vector& nums) + { + this->nums = nums; + int n = nums.size(); + bit.init(n+10); + + for (int i=0; iupdate(index,val); + * int param_2 = obj->sumRange(left,right); + */ diff --git a/Segment_Tree/3072.Distribute-Elements-Into-Two-Arrays-II/3072.Distribute-Elements-Into-Two-Arrays-II.cpp b/Segment_Tree/3072.Distribute-Elements-Into-Two-Arrays-II/3072.Distribute-Elements-Into-Two-Arrays-II.cpp new file mode 100644 index 000000000..645189786 --- /dev/null +++ b/Segment_Tree/3072.Distribute-Elements-Into-Two-Arrays-II/3072.Distribute-Elements-Into-Two-Arrays-II.cpp @@ -0,0 +1,171 @@ +using LL = long long; +LL M = 1e9+7; +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + LL info; // the sum value over the range + LL delta; + bool tag; + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + tag = 0; + delta = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + SegTreeNode(int a, int b, vector& val) // init for range [a,b] with the same-size array val + { + tag = 0; + delta = 0; + start = a, end = b; + if (a==b) + { + info = val[a]; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + void pushDown() + { + if (tag==1 && left) + { + left->info += delta * (left->end - left->start + 1); + left->delta += delta; + right->info += delta * (right->end - right->start + 1); + right->delta += delta; + left->tag = 1; + right->tag = 1; + tag = 0; + delta = 0; + } + } + + void updateRangeBy(int a, int b, int val) // increase range [a,b] by val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info += val * (end-start+1); + delta += val; + tag = 1; + return; + } + + if (left) + { + pushDown(); + left->updateRangeBy(a, b, val+delta); + right->updateRangeBy(a, b, val+delta); + delta = 0; + tag = 0; + info = left->info + right->info; // write your own logic + } + } + + LL queryRange(int a, int b) // query the sum within range [a,b] + { + if (b < start || a > end ) + { + return 0; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + LL ret = left->queryRange(a, b) + right->queryRange(a, b); + info = left->info + right->info; // check with your own logic + return ret; + } + + return info; // should not reach here + } +}; + +class Solution { +public: + vector resultArray(vector& nums) + { + setSet(nums.begin(), nums.end()); + int idx = 0; + unordered_mapMap; + for (int x: Set) + { + Map[x] = idx; + idx++; + } + int n = Set.size(); + + SegTreeNode* root1 = new SegTreeNode(0, n-1, 0); + int k = Map[nums[0]]; + root1->updateRangeBy(k, k , 1); + + SegTreeNode* root2 = new SegTreeNode(0, n-1, 0); + k = Map[nums[1]]; + root2->updateRangeBy(k, k , 1); + + vectorarr1({nums[0]}); + vectorarr2({nums[1]}); + for (int i=2; iqueryRange(k+1, n-1); + int y = root2->queryRange(k+1, n-1); + if (x>y) + { + arr1.push_back(nums[i]); + root1->updateRangeBy(k, k, 1); + } + else if (xupdateRangeBy(k, k, 1); + } + else + { + if (arr1.size() <= arr2.size()) + { + arr1.push_back(nums[i]); + root1->updateRangeBy(k, k, 1); + } + else + { + arr2.push_back(nums[i]); + root2->updateRangeBy(k, k, 1); + } + } + } + + vectorrets; + for (int x: arr1) rets.push_back(x); + for (int x: arr2) rets.push_back(x); + return rets; + } +}; diff --git a/Segment_Tree/3072.Distribute-Elements-Into-Two-Arrays-II/3072.Distribute-Elements-Into-Two-Arrays-II.py b/Segment_Tree/3072.Distribute-Elements-Into-Two-Arrays-II/3072.Distribute-Elements-Into-Two-Arrays-II.py new file mode 100644 index 000000000..b41d13c5d --- /dev/null +++ b/Segment_Tree/3072.Distribute-Elements-Into-Two-Arrays-II/3072.Distribute-Elements-Into-Two-Arrays-II.py @@ -0,0 +1,21 @@ +from sortedcontainers import SortedList +class Solution: + def resultArray(self, nums: List[int]) -> List[int]: + s1 = SortedList([nums[0]]) + s2 = SortedList([nums[1]]) + arr1 = [nums[0]] + arr2 = [nums[1]] + + for i in range(2, len(nums)): + x = len(s1)-s1.bisect_right(nums[i]) + y = len(s2)-s2.bisect_right(nums[i]) + if (x>y) or (x==y and len(arr1)<=len(arr2)): + arr1.append(nums[i]) + s1.add(nums[i]) + else: + arr2.append(nums[i]) + s2.add(nums[i]) + + return arr1+arr2 + + diff --git a/Segment_Tree/3072.Distribute-Elements-Into-Two-Arrays-II/Readme.md b/Segment_Tree/3072.Distribute-Elements-Into-Two-Arrays-II/Readme.md new file mode 100644 index 000000000..ced5d5bb5 --- /dev/null +++ b/Segment_Tree/3072.Distribute-Elements-Into-Two-Arrays-II/Readme.md @@ -0,0 +1,5 @@ +### 3072.Distribute-Elements-Into-Two-Arrays-II + +此题如果用python的SortedList来做的话,秒杀。 + +用C++的话,得用线段树或者树状数组。将nums里面的元素按照从小到大离散化,按照数值从小到大映射成编号(即第x号元素)。建立两棵线段树,叶子节点的容量与编号的上限相同(记做M),叶子节点的初始数值都是零。然后就模拟题意,对于nums[i],我们得到它对应的编号k,那么我们就分别查询两棵线段树里[k+1,M-1]范围内叶子节点之和,即为`greaterCount(arr, nums[i])`. 然后对应需要插入的那棵线段树,将第i个叶子节点增1. 不断模拟,由此得到最终的分配方案。 diff --git a/Segment_Tree/3161.Block-Placement-Queries/3161.Block-Placement-Queries.cpp b/Segment_Tree/3161.Block-Placement-Queries/3161.Block-Placement-Queries.cpp new file mode 100644 index 000000000..888d5c421 --- /dev/null +++ b/Segment_Tree/3161.Block-Placement-Queries/3161.Block-Placement-Queries.cpp @@ -0,0 +1,149 @@ +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + int info; // the maximum value of the range + bool tag; + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + tag = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = max(left->info, right->info); // check with your own logic + } + } + + SegTreeNode(int a, int b, vector& val) // init for range [a,b] with the same-size array val + { + tag = 0; + info = 0; + start = a, end = b; + if (a==b) + { + info = val[a]; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = max(left->info, right->info); // check with your own logic + } + } + + void pushDown() + { + if (tag==1 && left) + { + left->info = info; + right->info = info; + left->tag = 1; + right->tag = 1; + tag = 0; + } + } + + void updateRange(int a, int b, int val) // set range [a,b] with val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info = val; + tag = 1; + return; + } + + if (left) + { + pushDown(); + left->updateRange(a, b, val); + right->updateRange(a, b, val); + info = max(left->info, right->info); // write your own logic + } + } + + int queryRange(int a, int b) // query the maximum value within range [a,b] + { + if (b < start || a > end ) + { + return INT_MIN/2; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + int ret = max(left->queryRange(a, b), right->queryRange(a, b)); + info = max(left->info, right->info); // check with your own logic + return ret; + } + + return info; // should not reach here + } + +}; + + +class Solution { +public: + vector getResults(vector>& queries) + { + int n = min(50000, (int)queries.size()*3) + 5; + SegTreeNode* root = new SegTreeNode(0, n, 0); + + setSet; + Set.insert(0); + + vectorrets; + + for (auto q:queries) + { + if (q[0]==1) + { + int x = q[1]; + Set.insert(x); + auto iter = Set.find(x); + int a = *prev(iter); + root->updateRange(x,x,x-a); + + if (next(iter)!=Set.end()) + { + int b = *next(iter); + root->updateRange(b,b,b-x); + } + } + else + { + int x = q[1], sz = q[2]; + int len = root->queryRange(0, x); + + if (Set.find(x)==Set.end()) + { + auto iter = Set.lower_bound(x); + int a = *prev(iter); + len = max(len, x-a); + } + rets.push_back(len >= sz); + } + } + + return rets; + } +}; diff --git a/Segment_Tree/3161.Block-Placement-Queries/Readme.md b/Segment_Tree/3161.Block-Placement-Queries/Readme.md new file mode 100644 index 000000000..3f9e3eb41 --- /dev/null +++ b/Segment_Tree/3161.Block-Placement-Queries/Readme.md @@ -0,0 +1,9 @@ +### 3161.Block-Placement-Queries + +本题的第一类query会在数轴上不断插入block,通常情况下每插入一个block,就会隔出两个区间。当遇到第二类query时,我们只需要在[0,x]范围内查看这些区间,找出最大的区间长度,再与sz比较即可。此时我们只需要把每个区间的长度,作为该区间右端点的一个属性,就可以发现就是在[0,x]里求最大值。所以我们容易想到,只需要构造一棵线段树,其中queryRange是求任意一段区间内的最大值。初始状态每个点的值是0. + +更具体的,对于第一类操作,我们在x处插入一个block时,找到它之前已经存在的block位置记做a,之后已经存在的block位置记做b,那么我们只需要对线段树进行单点更新:在x处更新属性x-a,在b处更新数值b-x(前提是b存在)。 + +对于第二类操作,我们只需要在线段树的[0,x]范围里找最大值。特别注意,如果x处本身并没有block,我们还需要考察x之前的那个block(记做a)到x这段空间长度。 + +最后,对于一个x,如果找到它之前和之后的block呢?只需要维护一个有序容器set即可,不断将x插入其中,并且用lower_bound和upper_bound找到其在Set里的前后元素。 diff --git a/Segment_Tree/3165.Maximum-Sum-of-Subsequence-With-Non-adjacent-Elements/3165.Maximum-Sum-of-Subsequence-With-Non-adjacent-Elements.cpp b/Segment_Tree/3165.Maximum-Sum-of-Subsequence-With-Non-adjacent-Elements/3165.Maximum-Sum-of-Subsequence-With-Non-adjacent-Elements.cpp new file mode 100644 index 000000000..d0d504a54 --- /dev/null +++ b/Segment_Tree/3165.Maximum-Sum-of-Subsequence-With-Non-adjacent-Elements/3165.Maximum-Sum-of-Subsequence-With-Non-adjacent-Elements.cpp @@ -0,0 +1,66 @@ +using LL = long long; +LL M = 1e9+7; +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + LL info00, info11, info10, info01; + + SegTreeNode(int a, int b, vector& vals) // init for range [a,b] with val + { + start = a, end = b; + if (a==b) + { + info11 = vals[start], info01 = -1e18, info10 = -1e18, info00 = 0; + return; + } + int mid = (a+b)/2; + + left = new SegTreeNode(a, mid, vals); + right = new SegTreeNode(mid+1, b, vals); + info11 = max({left->info10 + right->info01, left->info11 + right->info01, left->info10 + right->info11}); + info00 = max({left->info00 + right->info00, left->info01 + right->info00, left->info00 + right->info10}); + info10 = max({left->info10 + right->info00, left->info10 + right->info10, left->info11 + right->info00}); + info01 = max({left->info00 + right->info01, left->info01 + right->info01, left->info00 + right->info11}); + } + + void updateRange(int a, int val) // set range [a,b] with val + { + if (a < start || a > end ) // not covered by [a,b] at all + return; + if (start==end) // completely covered within [a,b] + { + info00 = 0; + info11 = val; + return; + } + + left->updateRange(a, val); + right->updateRange(a, val); + info11 = max({left->info10 + right->info01, left->info11 + right->info01, left->info10 + right->info11}); + info00 = max({left->info00 + right->info00, left->info01 + right->info00, left->info00 + right->info10}); + info10 = max({left->info10 + right->info00, left->info10 + right->info10, left->info11 + right->info00}); + info01 = max({left->info00 + right->info01, left->info01 + right->info01, left->info00 + right->info11}); + } +}; + + +class Solution { +public: + int maximumSumSubsequence(vector& nums, vector>& queries) + { + int n = nums.size(); + SegTreeNode* root = new SegTreeNode(0, n-1, nums); + + LL ret = 0; + for (auto q: queries) + { + root->updateRange(q[0], q[1]); + ret += max({root->info00,root->info01,root->info10,root->info11}); + ret%=M; + } + return ret; + } +}; diff --git a/Segment_Tree/3165.Maximum-Sum-of-Subsequence-With-Non-adjacent-Elements/Readme.md b/Segment_Tree/3165.Maximum-Sum-of-Subsequence-With-Non-adjacent-Elements/Readme.md new file mode 100644 index 000000000..1f35afc4b --- /dev/null +++ b/Segment_Tree/3165.Maximum-Sum-of-Subsequence-With-Non-adjacent-Elements/Readme.md @@ -0,0 +1,70 @@ +### 3165.Maximum-Sum-of-Subsequence-With-Non-adjacent-Elements + +本题的基础是经典的house robber,但是如果从house robber的常规解法去思考,那么本题是做不下去的。事实上,house robber有另一种适合拓展的做法,即递归分治。 + +我们令dp00[i][j]表示[i:j]区间内的最大收益,并且要求左右端点都不取到。同理,定义dp11[i][j]为区间[i:j]两个端点都取到时的最大收益,定义dp10[i][j]为区间[i:j]仅左端点都取到时的最大收益,定义dp01[i][j]为区间[i:j]仅右端点都取到时的最大收益。 + +我们不难发现,对于[i:j]内的任意一个点k,我们可以将[i:j]的收益分为[i:k]和[k+1:j]两端区间的收益之和。更具体地,要满足“不同时取相邻节点”的原则,针对分割处取或不取的决策,我们可以有对dp00[i][j]的三种分解 +``` +dp00[i][j] = max{dp00[i][k]+dp00[k+1][j], dp01[i][k]+dp00[k+1][j], dp00[i][k]+dp10[k+1][j],} +``` +同理,我们可以写出dp01,dp10,dp11的分解。 + +至此我们可以发现这是一个可以自上而下分治解决的问题。边界条件就是`dp00[i][i]=0, dp11[i][i]=1, dp01[i][i]=dp10[i][i]=-inf`. + +写成这样分治的结构,我们明显可以搞成线段树,它有两个好处: +1. 线段树可以用log(n)的时间求任意一段区间内的最大收益。 +2. 对于任何单点的变动后,我们依然可以用log(n)的时间更新整棵线段树。 + +注意,这样的线段树,懒标记是不能用的。每次单点的变动,必须将更新传播到最底层,再把区间最大收益反向传播上去。 + +本题的线段树不需要queryRange方法,最终只需要返回`root->info00,root->info10,root->info01,root->info11`的最大值即可。 + +数据结构如下: +```cpp +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + LL info00, info11, info10, info01; + + SegTreeNode(int a, int b, vector& vals) // init for range [a,b] with val + { + start = a, end = b; + if (a==b) + { + info11 = vals[start], info01 = -1e18, info10 = -1e18, info00 = 0; + return; + } + int mid = (a+b)/2; + + left = new SegTreeNode(a, mid, vals); + right = new SegTreeNode(mid+1, b, vals); + info11 = max({left->info10 + right->info01, left->info11 + right->info01, left->info10 + right->info11}); + info00 = max({left->info00 + right->info00, left->info01 + right->info00, left->info00 + right->info10}); + info10 = max({left->info10 + right->info00, left->info10 + right->info10, left->info11 + right->info00}); + info01 = max({left->info00 + right->info01, left->info01 + right->info01, left->info00 + right->info11}); + } + + void updateRange(int a, int val) // set range [a,b] with val + { + if (a < start || a > end ) // not covered by [a,b] at all + return; + if (start==end) // completely covered within [a,b] + { + info00 = 0; + info11 = val; + return; + } + + left->updateRange(a, val); + right->updateRange(a, val); + info11 = max({left->info10 + right->info01, left->info11 + right->info01, left->info10 + right->info11}); + info00 = max({left->info00 + right->info00, left->info01 + right->info00, left->info00 + right->info10}); + info10 = max({left->info10 + right->info00, left->info10 + right->info10, left->info11 + right->info00}); + info01 = max({left->info00 + right->info01, left->info01 + right->info01, left->info00 + right->info11}); + } +}; +``` diff --git a/Segment_Tree/3187.Peaks-in-Array/3187.Peaks-in-Array.cpp b/Segment_Tree/3187.Peaks-in-Array/3187.Peaks-in-Array.cpp new file mode 100644 index 000000000..a3125ea3a --- /dev/null +++ b/Segment_Tree/3187.Peaks-in-Array/3187.Peaks-in-Array.cpp @@ -0,0 +1,148 @@ +using LL = long long; +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + LL info; // the sum value over the range + bool lazy_tag; + LL lazy_val; + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + lazy_tag = 0; + lazy_val = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + SegTreeNode(int a, int b, vector& val) // init for range [a,b] with the same-size array val + { + lazy_tag = 0; + lazy_val = 0; + start = a, end = b; + if (a==b) + { + info = val[a]; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + void pushDown() + { + if (lazy_tag==1 && left) + { + left->info = lazy_val * (left->end - left->start + 1); + right->info = lazy_val * (right->end - right->start + 1); + left->lazy_tag = 1; left->lazy_val = lazy_val; + right->lazy_tag = 1; right->lazy_val = lazy_val; + lazy_tag = 0; lazy_val = 0; + } + } + + void updateRange(int a, int b, int val) // set range [a,b] with val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info = val * (end-start+1); + lazy_tag = 1; + lazy_val = val; + return; + } + + if (left) + { + pushDown(); + left->updateRange(a, b, val); + right->updateRange(a, b, val); + info = left->info + right->info; // write your own logic + } + } + + LL queryRange(int a, int b) // query the sum over range [a,b] + { + if (b < start || a > end ) + { + return 0; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + LL ret = left->queryRange(a, b) + right->queryRange(a, b); + info = left->info + right->info; // check with your own logic + return ret; + } + + return info; // should not reach here + } +}; + +class Solution { +public: + vector countOfPeaks(vector& nums, vector>& queries) + { + int n = nums.size(); + vectorpeaks(n, 0); + for (int i=1; inums[i-1] && nums[i]>nums[i+1]) + peaks[i] = 1; + } + + SegTreeNode* root = new SegTreeNode(0, n-1, peaks); + + vectorrets; + for (auto query: queries) + { + if (query[0]==1) + { + int a = query[1], b = query[2]; + rets.push_back(root->queryRange(a+1, b-1)); + } + else + { + int i = query[1]; + nums[i] = query[2]; + if (i>=1 && i=1 && i-1=1 && i+1&nums, vector&peaks) + { + int v = nums[i]>nums[i-1] && nums[i]>nums[i+1]; + if (v==peaks[i]) return; + peaks[i] = v; + root->updateRange(i,i,v); + } +}; diff --git a/Segment_Tree/3187.Peaks-in-Array/Readme.md b/Segment_Tree/3187.Peaks-in-Array/Readme.md new file mode 100644 index 000000000..78d93ad47 --- /dev/null +++ b/Segment_Tree/3187.Peaks-in-Array/Readme.md @@ -0,0 +1,5 @@ +### 3187.Peaks-in-Array + +高效地求任意一段区间内的peak的数目,显然是用线段树实现。我们构造一棵线段树,叶子节点是一个binary value,表示该元素是否是peak。 + +当我们修改nums[i]时,可能会对i-1,i,i+1三处位置的peak状态产生影响。所以我们需要分别进行考察,相应地修改线段树节点的值。 diff --git a/Segment_Tree/3261.Count-Substrings-That-Satisfy-K-Constraint-II/3261.Count-Substrings-That-Satisfy-K-Constraint-II.cpp b/Segment_Tree/3261.Count-Substrings-That-Satisfy-K-Constraint-II/3261.Count-Substrings-That-Satisfy-K-Constraint-II.cpp new file mode 100644 index 000000000..6a6521eeb --- /dev/null +++ b/Segment_Tree/3261.Count-Substrings-That-Satisfy-K-Constraint-II/3261.Count-Substrings-That-Satisfy-K-Constraint-II.cpp @@ -0,0 +1,156 @@ +using LL = long long; +LL M = 1e9+7; +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + LL info; // the sum value over the range + LL delta; + bool tag; + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + tag = 0; + delta = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + SegTreeNode(int a, int b, vector& val) // init for range [a,b] with the same-size array val + { + tag = 0; + delta = 0; + start = a, end = b; + if (a==b) + { + info = val[a]; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + void pushDown() + { + if (tag==1 && left) + { + left->info += delta * (left->end - left->start + 1); + left->delta += delta; + right->info += delta * (right->end - right->start + 1); + right->delta += delta; + left->tag = 1; + right->tag = 1; + tag = 0; + delta = 0; + } + } + + void updateRangeBy(int a, int b, int val) // increase range [a,b] by val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info += val * (end-start+1); + delta += val; + tag = 1; + return; + } + + if (left) + { + pushDown(); + left->updateRangeBy(a, b, val+delta); + right->updateRangeBy(a, b, val+delta); + delta = 0; + tag = 0; + info = left->info + right->info; // write your own logic + } + } + + LL queryRange(int a, int b) // query the sum within range [a,b] + { + if (b < start || a > end ) + { + return 0; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + LL ret = left->queryRange(a, b) + right->queryRange(a, b); + info = left->info + right->info; // check with your own logic + return ret; + } + + return info; // should not reach here + } +}; + + +class Solution { +public: + vector countKConstraintSubstrings(string s, int k, vector>& queries) + { + int n = s.size(); + vectorend(n); + int j = 0; + int count0=0, count1=0; + for (int i=0; irets(queries.size()); + SegTreeNode* root = new SegTreeNode(0, n-1, 0); + + int i = n-1; + for (auto q: queries) + { + int a = q[0], b = q[1], idx=q[2]; + while (i>=a) + { + root->updateRangeBy(i, end[i], 1); + i--; + } + rets[idx] = root->queryRange(a,b); + } + + return rets; + } +}; diff --git a/Segment_Tree/3261.Count-Substrings-That-Satisfy-K-Constraint-II/Readme.md b/Segment_Tree/3261.Count-Substrings-That-Satisfy-K-Constraint-II/Readme.md new file mode 100644 index 000000000..fa95f795d --- /dev/null +++ b/Segment_Tree/3261.Count-Substrings-That-Satisfy-K-Constraint-II/Readme.md @@ -0,0 +1,7 @@ +### 3261.Count-Substrings-That-Satisfy-K-Constraint-II + +假设以i作为左边界,那么我们容易得到最远的右边界j,使得[i:j]是最长的valid substring;并且以其中的任何一点作为右边界,都是valid substring. 然后发现,如果将左边界i往右移动一位,那么最远的右边界位置j必然是单调向右移动的。所以我们可以用双指针的方法,求得所有的len[i],表示以i作为左边界,那么我们得到最远的valid substring的右边界。 + +那么对于一个query而言,如何求[l,r]内所有的valid substring呢?考虑到Q的数量很大,我们很容易想到线段树,希望能用log的时间来解决一个query(计算一个区间内的substring之和)。但是一个valid sbustring是需要用两个端点来表示的,这似乎和线段树的应用有些不同。于是我们想到,能否在线段树里用一个点来表示一个substring?于是我们可以尝试将所有的valid substring只用其右端点来表示。举个例子,对于以i为左端点的valid subtring,根据其右端点的位置,我们在i,i+1,i+2,...,len[i]都可以记录+1;也就是说,在线段树上我们可以对[i,len[i]]这段区间整体都加1. 此外,线段树就可以很方便地query所有右端点落入[l,r]内valid substring的个数。 + +但是这里就有个问题,[l,r]内的valid substring,并不等同于右端点落入[l,r]内valid substring。对于后者而言,有些substring的左端点在l的左边,这是需要排除掉的。我们该如何去掉那些左端点在l左边的那些substring呢?在线段树的操作里,似乎并没有什么好办法,但是有一个巧妙的策略:那就是不把“那些左端点位于l左边的那些substring”收录进线段树。这就提示我们可以将queries按照左端点倒序排列依次处理。对于[l,r]这个query而言,我们(只)收录进所有左端点大于等于l的那些substring。此时在线段树内查询[l,r]的区间和,就代表了所有右端点在[l,r]范围内的valid substring,同时这些string的左端点都不会小于l。 diff --git a/Segment_Tree/3624.Number-of-Integers-With-Popcount-Depth-Equal-to-K-II/3624.Number-of-Integers-With-Popcount-Depth-Equal-to-K-II.cpp b/Segment_Tree/3624.Number-of-Integers-With-Popcount-Depth-Equal-to-K-II/3624.Number-of-Integers-With-Popcount-Depth-Equal-to-K-II.cpp new file mode 100644 index 000000000..35f00922a --- /dev/null +++ b/Segment_Tree/3624.Number-of-Integers-With-Popcount-Depth-Equal-to-K-II/3624.Number-of-Integers-With-Popcount-Depth-Equal-to-K-II.cpp @@ -0,0 +1,88 @@ +using ll = long long; + +class BIT{ + public: + int N; + vectorbitArr; // Note: all arrays are 1-index + vectornums; + long long M = 1e9+7; + + void init(int N) + { + this->N = N; + bitArr.resize(N+1); + nums.resize(N+1); + } + + // increase nums[i] by delta + void updateDelta(int i, long long delta) { + int idx = i; + while (idx <= N) + { + bitArr[idx]+=delta; + // bitArr[idx] %= M; + idx+=idx&(-idx); + } + } + + // sum of a range nums[1:j] inclusively + long long queryPreSum(int idx){ + long long result = 0; + while (idx){ + result += bitArr[idx]; + // result %= M; + idx-=idx&(-idx); + } + return result; + } + + // sum of a range nums[i:j] inclusively + long long sumRange(int i, int j) { + return queryPreSum(j)-queryPreSum(i-1); + } +}; + +class Solution { +public: + int getDepth(ll x) { + int count = 0; + while (x!=1ll) { + x = __builtin_popcountll(x); + count++; + } + return count; + } + + vector popcountDepth(vector& nums, vector>& queries) { + int N = nums.size()+1; + vector bit_arr (5); + for (int i=0; i<=4; i++) + bit_arr[i].init(N); + + for (int i=0; irets; + for (auto&q: queries) { + if (q[0]==2) { + ll idx = q[1], val = q[2]; + int depth0 = getDepth(nums[idx]); + int depth1 = getDepth(val); + bit_arr[depth0].updateDelta(idx+1, -1); + bit_arr[depth1].updateDelta(idx+1, 1); + nums[idx] = val; + } else { + int l = q[1], r = q[2], k = q[3]; + if (k>=5) + rets.push_back(0); + else + rets.push_back(bit_arr[k].sumRange(l+1,r+1)); + } + } + + return rets; + + } +}; diff --git a/Segment_Tree/3624.Number-of-Integers-With-Popcount-Depth-Equal-to-K-II/Readme.md b/Segment_Tree/3624.Number-of-Integers-With-Popcount-Depth-Equal-to-K-II/Readme.md new file mode 100644 index 000000000..338adc20e --- /dev/null +++ b/Segment_Tree/3624.Number-of-Integers-With-Popcount-Depth-Equal-to-K-II/Readme.md @@ -0,0 +1,10 @@ +### 3624.Number-of-Integers-With-Popcount-Depth-Equal-to-K-II + +首先我们要意识到,任何一个长整型的popcount depth不可能很大。 + +首先对于<=64的数,我们可以使用穷举的方法,发现最坏情况就是63,它的路径就是63->6->2->1。而剩下任意大于64的长整型(共有64个bit),它的第一步变化之后必然落入[1,64]之间。由之前的结论,其路径最多再走3步就会到1。所以结论是:任意一个长整型,其popcount depth不超过4. + +既然处理任何一个数只需要4步就能求出深度,那我们可以将所有nums都先处理一遍,将所有nums可以按照depth分类。于是对于每一种深度,我们可以知道有哪些数对应,将其index标记为1(否则标记为0),这样用树状数组就可以高效(log的时间)地算出任意区间内有多少数对应该深度(即求区间和),满足了第一类query的要求。同时树状数组支持对单点的动态修改,支持了第二类query的操作。 + +因为深度的种类只有5种,我们只需要开5个这样的树状数组,因此空间复杂度也是符合要求的。 + diff --git a/Segment_Tree/3671.Sum-of-Beautiful-Subsequences/3671.Sum-of-Beautiful-Subsequences.cpp b/Segment_Tree/3671.Sum-of-Beautiful-Subsequences/3671.Sum-of-Beautiful-Subsequences.cpp new file mode 100644 index 000000000..e3a9edca2 --- /dev/null +++ b/Segment_Tree/3671.Sum-of-Beautiful-Subsequences/3671.Sum-of-Beautiful-Subsequences.cpp @@ -0,0 +1,91 @@ +class BIT{ + public: + int N; + vectorbitArr; // Note: all arrays are 1-index + vectornums; + long long M = 1e9+7; + + void init(int N) + { + this->N = N; + bitArr.resize(N+1); + nums.resize(N+1); + } + + // increase nums[i] by delta + void updateDelta(int i, long long delta) { + int idx = i; + while (idx <= N) + { + bitArr[idx]+=delta; + bitArr[idx] %= M; + idx+=idx&(-idx); + } + } + + // sum of a range nums[1:j] inclusively + long long queryPreSum(int idx){ + long long result = 0; + while (idx){ + result += bitArr[idx]; + // result %= M; + idx-=idx&(-idx); + } + return result; + } + + // sum of a range nums[i:j] inclusively + long long sumRange(int i, int j) { + return queryPreSum(j)-queryPreSum(i-1); + } +}; + +long long MOD = 1e9+7; + +class Solution { +public: + int totalBeauty(vector& nums) { + int mx = *max_element(begin(nums), end(nums)); + vector>pos(mx+1); + for (int i=0; iseq(mx+1); + for (int g=1; g<=mx; g++) { + mapmp; + for (int x: pos[g]) mp[nums[x]] = 1; + int idx = 0; + for (auto& [k,v]:mp) v = ++idx; + + vectorarr; + for (int x: pos[g]) arr.push_back(mp[nums[x]]); + + BIT bit; + bit.init(arr.size()); + for (int x: arr) { + long long ans = bit.queryPreSum(x-1) + 1; + seq[g] = (seq[g]+ans)%MOD; + bit.updateDelta(x, ans); + } + } + + vectorret(mx+1); + for (int g=mx; g>=1; g--) { + ret[g] = seq[g]; + for (int j=g*2; j<=mx; j+=g) + ret[g] = (ret[g]-ret[j]+MOD) % MOD; + } + + long long ans = 0; + for (int g=mx; g>=1; g--) { + ans = (ans + g*ret[g]) % MOD; + } + return ans; + } +}; diff --git a/Segment_Tree/3671.Sum-of-Beautiful-Subsequences/Readme.md b/Segment_Tree/3671.Sum-of-Beautiful-Subsequences/Readme.md new file mode 100644 index 000000000..8fe0eea1f --- /dev/null +++ b/Segment_Tree/3671.Sum-of-Beautiful-Subsequences/Readme.md @@ -0,0 +1,12 @@ +### 3671.Sum-of-Beautiful-Subsequences + +此题包含了两个知识点。我们拆开来分析。 + +第一个问题,对于任意的正整数g,如何计算在数组里有多少个strictly increasing subsequence并且序列中每个元素都是“g的倍数”。假设我们已经通过预处理,知道数组里g的倍数位于[i1,i2,...,ik]的位置上。注意我们只需要构造严格递增的序列,所以我们需要只知道它们之间的大小关系和位置关系,但是不需要知道绝对大小和绝对位置,故我们可以离散化,转化为类似[1,3,5,2,4]这样的形式,表示数组里有5个数是g的倍数,且它们的相对位置和相对大小都可以用这样的形式表示出来。于是我们就构造出了这样一个问题:里面有多少个严格递增的序列? + +这是一个经典问题,我们可以用树状数组来解。我们想象一个长度为5的空数组,需要依照上述次序涂黑。首先我们在填写1时,以它结尾的递增序列只有一个,记作f(1)。然后我们填写3时,以它结尾的递增序列就是`f(3)=f(1)+1`,表示以1结尾的递增序列再附加上3,或者单独的3也可以构成一个符合条件的序列。然后我们填写5时,以它为结尾的递增序列就是`f(5)=f(1)+f(3)+1`。然后我们填写2时,以它结尾的递增序列就是`f(2)=f(1)+1`...由此我们可以看出f(x)就是BIT数组前缀和`f(1)+f(2)+...+f(x-1)`再加1.于是我们就可以求出所有的f(x)并且求和,就得出:数组里有多少个严格递增序列、并且每个元素都是“g的倍数”,我们记作p(g). + +在有了p(g)的基础上,我们如何进一步求出数组里有多少个严格递增序列、并且所有元素的GCD恰好是g呢?我们记作这样的解是ret(g),其实就是删去在p(g)里去除“2以上g的倍数”的情况,即`ret(g) = p(g)-ret(2g)-ret(3g)-...-ret(kg)`. 所以我们只需要对g按从大到小的顺序求解q:当解到ret(g)时,p(g)和`ret(2g)...ret(kg)`就都是已知的了。初始条件是对于数组里的最大元素mx,`ret(mx) = 1`. + +最终返回`g*ret(g)`对于g=1,2,..,mx的之和 + diff --git a/Segment_Tree/370.Range-Addition/370.Range-Addition.cpp b/Segment_Tree/370.Range-Addition/370.Range-Addition_DiffArray.cpp similarity index 100% rename from Segment_Tree/370.Range-Addition/370.Range-Addition.cpp rename to Segment_Tree/370.Range-Addition/370.Range-Addition_DiffArray.cpp diff --git a/Segment_Tree/370.Range-Addition/370.Range-Addition_SegTree.cpp b/Segment_Tree/370.Range-Addition/370.Range-Addition_SegTree.cpp new file mode 100644 index 000000000..aedfcc976 --- /dev/null +++ b/Segment_Tree/370.Range-Addition/370.Range-Addition_SegTree.cpp @@ -0,0 +1,107 @@ +using LL = long long; +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + LL info; // the sum value over the range + LL delta; + bool tag; + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + tag = 0; + delta = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + void pushDown() + { + if (tag==1 && left) + { + left->delta += delta; + right->delta += delta; + left->tag = 1; + right->tag = 1; + tag = 0; + delta = 0; + } + } + + void updateRangeBy(int a, int b, int val) // increase range [a,b] by val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + delta += val; + tag = 1; + return; + } + + if (left) + { + pushDown(); + left->updateRangeBy(a, b, val); + right->updateRangeBy(a, b, val); + info = left->info + right->info; // write your own logic + } + } + + LL queryRange(int a, int b) // query the maximum value within range [a,b] + { + if (b < start || a > end ) + { + return 0; // check with your own logic + } + if (a <= start && end <=b) + { + return info + delta*(end-start+1); // check with your own logic + } + + if (left) + { + pushDown(); + LL ret = left->queryRange(a, b) + right->queryRange(a, b); + info = left->info + right->info; // check with your own logic + return ret; + } + + return info; // should not reach here + } +}; + +class Solution { +public: + vector getModifiedArray(int length, vector>& updates) + { + SegTreeNode* root = new SegTreeNode(0, length-1, 0); + + for (auto update: updates) + { + root->updateRangeBy(update[0], update[1], update[2]); + } + + vectorrets(length); + for (int i=0; iqueryRange(i, i); + } + + return rets; + } +}; + diff --git a/Segment_Tree/370.Range-Addition/370.Range-Addition_SegmentTree_lazyTag.cpp b/Segment_Tree/370.Range-Addition/370.Range-Addition_SegmentTree_lazyTag.cpp deleted file mode 100644 index d26b0c13d..000000000 --- a/Segment_Tree/370.Range-Addition/370.Range-Addition_SegmentTree_lazyTag.cpp +++ /dev/null @@ -1,101 +0,0 @@ -class Solution { - class SegTreeNode - { - public: - SegTreeNode* left; - SegTreeNode* right; - int start, end; - int info; - int tag; - SegTreeNode(int a, int b):start(a),end(b),info(0),tag(0),left(NULL),right(NULL){} - }; - - void init(SegTreeNode* node, int a, int b) // init for range [a,b] - { - if (a==b) - { - node->info = 0; - return; - } - int mid = (a+b)/2; - if (node->left==NULL) - { - node->left = new SegTreeNode(a, mid); - node->right = new SegTreeNode(mid+1, b); - } - init(node->left, a, mid); - init(node->right, mid+1, b); - - node->info = 0; // write your own logic - } - - void updateRange(SegTreeNode* node, int a, int b, int val) - { - if (b < node->start || a > node->end ) - return; - if (a <= node->start && b>=node->end) - { - node->info += val * (node->end-node->start+1); - node->tag += val; - return; - } - - pushdown(node); // erase lazy tag and propagate down - updateRange(node->left, a, b, val); - updateRange(node->right, a, b, val); - } - - void pushdown(SegTreeNode* node) - { - if (node->tag != 0) - { - node->left->tag += node->tag; - node->right->tag += node->tag; - node->left->info += node->tag * (node->left->end-node->left->start+1); - node->right->info += node->tag * (node->right->end-node->right->start+1); - node->tag = 0; - } - } - - int querySingle(SegTreeNode* node, int id) - { - if (id < node->start || id > node->end ) - { - return INT_MAX; // write your own logic - } - if (node->start==id && node->end==id) - { - return node->info; - } - - pushdown(node); - int a = querySingle(node->left, id); - int b = querySingle(node->right, id); - if (a!=INT_MAX) return a; - else if (b!=INT_MAX) return b; - else return INT_MAX; - } - - - -public: - vector getModifiedArray(int length, vector>& updates) - { - SegTreeNode* root = new SegTreeNode(0, length-1); - init(root, 0, length-1); - - for (auto update: updates) - { - updateRange(root, update[0], update[1], update[2]); - } - - vectorrets(length); - for (int i=0; i=end && left==NULL) // bottom node condition 1; - { - status += s; - return; - } - if (a>=end || b<=start) // bottom node condition 2; - return; - int mid = start+(end-start)/2; - if (left==NULL) // no children? create them - { - left = new SegTree(start,mid,status); - right = new SegTree(mid,end,status); - } // recursion - left->setStatus(a,b,s); - right->setStatus(a,b,s); - } - }; -public: - vector getModifiedArray(int length, vector>& updates) - { - SegTree* root = new SegTree(0,length,0); - for (auto x:updates) - root->setStatus(x[0],x[1]+1,x[2]); - vectorresults(length); - DFS(root,results); - return results; - } - void DFS(SegTree* node, vector&results) - { - if (node->left!=NULL) - { - DFS(node->left,results); - DFS(node->right,results); - return; - } - for (int i=node->start; iend; i++) - results[i] = node->status; - } -}; diff --git a/Segment_Tree/370.Range-Addition/Readme.md b/Segment_Tree/370.Range-Addition/Readme.md index c8dbe7097..2ad786890 100644 --- a/Segment_Tree/370.Range-Addition/Readme.md +++ b/Segment_Tree/370.Range-Addition/Readme.md @@ -1,9 +1,9 @@ ### 370.Range-Addition -#### 解法1: +#### 解法1:差分数组 此题比较简单的解法是用差分数组```diff```。```diff[i]```表示```nums[i]```比```nums[i-1]```大多少。这样如果已知```nums[i-1]```,那么就有```diff[i]=nums[i-1]+diff[i]```。本题中的三元参数```update(i,j,k)```恰好就是给出了这样的差分数组的信息:```diff[i]+=k, diff[j+1]-=k```. -#### 解法2: +#### 解法2:线段树 本题和307很相似,也可以用线段树来实现。最大的区别就是本题中需要实现的是区间更新。在线段树的basic版本中(LC307),我们实现的都是单点更新,用单点更新来实现区间更细,效率肯定很低。 本题实现的是线段树的进阶版本,使用lazy tag来实现区间更新的延迟推广。具体的说,我们想要将区间[a:b]增加1时,不一定需要立即下沉到每个叶子节点将其info增1。如果我们没有对[a:b]中的任何一个叶子节点做查询的话,意味着不需要任何下沉操作。我们只增加[a:b]对应的节点的info,但同时标记该节点的tag为1。如果以后某个时刻,我们需要下沉访问某个下层区间或者叶子节点,那么在下沉的过程中必然会重新经过[a:b]对应的node,此时我们顺便将tag信息读入并在访问下层区间或叶子节点时,将它们的info加上这个“延迟加载”的1就行。 diff --git a/Segment_Tree/699.Falling-Squares/699.Falling-Squares-v2.cpp b/Segment_Tree/699.Falling-Squares/699.Falling-Squares-v2.cpp deleted file mode 100644 index 373f43045..000000000 --- a/Segment_Tree/699.Falling-Squares/699.Falling-Squares-v2.cpp +++ /dev/null @@ -1,39 +0,0 @@ -class Solution { -public: - vector fallingSquares(vector>& positions) - { - mapMap; - Map[0]=0; - Map[INT_MAX]=0; - int cur=0; - vectorresults; - - for (int i=0; ifirst!=left) pos=prev(pos,1); - while (pos->first <= right) - { - Hmax = max(Hmax, pos->second); - pos = next(pos,1); - } - int rightHeight = prev(pos,1)->second; - - Map.erase(pos1,pos); - Map[left]=Hmax+len; - Map[right+1]=rightHeight; - - cur = max(cur, Hmax+len); - results.push_back(cur); - } - - return results; - } -}; diff --git a/Segment_Tree/699.Falling-Squares/699.Falling-Squares.cpp b/Segment_Tree/699.Falling-Squares/699.Falling-Squares.cpp deleted file mode 100644 index 9522ee658..000000000 --- a/Segment_Tree/699.Falling-Squares/699.Falling-Squares.cpp +++ /dev/null @@ -1,44 +0,0 @@ -class Solution { -public: - vector fallingSquares(vector>& positions) - { - mapMap; - - Map[0]=0; - Map[INT_MAX]=0; - - vectorresults; - int cur=0; - - for (auto p:positions) - { - int left=p.first; - int right=p.first+p.second-1; - int h=p.second; - int maxH=0; - - auto ptri = Map.lower_bound(left); - auto ptrj = Map.upper_bound(right); - - int temp = prev(ptrj,1)->second; - - auto ptr = ptri->first==left? ptri:prev(ptri,1); - while (ptr!=ptrj) - { - maxH=max(maxH, ptr->second); - ptr = next(ptr,1); - } - if (ptri!=ptrj) - Map.erase(ptri,ptrj); - - Map[left] = maxH+h; - Map[right+1] = temp; - cur = max(cur, maxH+h); - - results.push_back(cur); - } - - return results; - - } -}; diff --git a/Segment_Tree/699.Falling-Squares/699.Falling-Squares_Heap_v1.cpp b/Segment_Tree/699.Falling-Squares/699.Falling-Squares_Heap_v1.cpp new file mode 100644 index 000000000..f1fe39f79 --- /dev/null +++ b/Segment_Tree/699.Falling-Squares/699.Falling-Squares_Heap_v1.cpp @@ -0,0 +1,42 @@ +class Solution { +public: + vector fallingSquares(vector > &positions) { + map Map; + + Map[0] = 0; + Map[INT_MAX] = 0; + + vector results; + int cur = 0; + + for (auto p: positions) { + int left = p[0]; + int right = p[0] + p[1] - 1; + int h = p[1]; + int maxH = 0; + + auto ptri = Map.lower_bound(left); + auto ptrj = Map.upper_bound(right); + + int temp = prev(ptrj, 1)->second; + + auto ptr = ptri->first == left ? ptri : prev(ptri, 1); + while (ptr != ptrj) { + maxH = max(maxH, ptr->second); + ptr = next(ptr, 1); + } + if (ptri != ptrj) + Map.erase(ptri, ptrj); + + Map[left] = maxH + h; + if (right + 1 < ptrj->first) + Map[right + 1] = temp; + cur = max(cur, maxH + h); + + results.push_back(cur); + } + + return results; + + } +}; diff --git a/Segment_Tree/699.Falling-Squares/699.Falling-Squares_Heap_v2.cpp b/Segment_Tree/699.Falling-Squares/699.Falling-Squares_Heap_v2.cpp new file mode 100644 index 000000000..b63837efd --- /dev/null +++ b/Segment_Tree/699.Falling-Squares/699.Falling-Squares_Heap_v2.cpp @@ -0,0 +1,37 @@ +class Solution { +public: + vector fallingSquares(vector > &positions) { + map Map; + Map[0] = 0; + Map[INT_MAX] = 0; + int cur = 0; + vector results; + + for (int i = 0; i < positions.size(); i++) { + int left = positions[i][0]; + int len = positions[i][1]; + int right = left + len - 1; + + auto pos1 = Map.lower_bound(left); + + int Hmax = 0; + auto pos = pos1; + if (pos->first != left) pos = prev(pos, 1); + while (pos->first <= right) { + Hmax = max(Hmax, pos->second); + pos = next(pos, 1); + } + int rightHeight = prev(pos, 1)->second; + + Map.erase(pos1, pos); + Map[left] = Hmax + len; + if (right + 1 < pos->first) + Map[right + 1] = rightHeight; + + cur = max(cur, Hmax + len); + results.push_back(cur); + } + + return results; + } +}; diff --git a/Segment_Tree/699.Falling-Squares/699.Falling-Squares_SegTree_v1.cpp b/Segment_Tree/699.Falling-Squares/699.Falling-Squares_SegTree_v1.cpp new file mode 100644 index 000000000..b7baf53a0 --- /dev/null +++ b/Segment_Tree/699.Falling-Squares/699.Falling-Squares_SegTree_v1.cpp @@ -0,0 +1,74 @@ +// 线段树可动态开点 + +class SegTree +{ + public: + int start,end,status; + SegTree* left; + SegTree* right; + SegTree(int a,int b,int s):start(a),end(b),status(s),left(NULL),right(NULL){} + + void remove(SegTree* &node) + { + if (node==NULL) return; + remove(node->left); + remove(node->right); + delete node; + node=NULL; + return; + } + + void setStatus(int a, int b, int s) + { + if (a<=start && b>=end) + { + remove(left); + remove(right); + status = s; + return; + } + if (a>=end || b<=start) + return; + if (left==NULL) + { + int mid = (end-start)/2+start; + left = new SegTree(start,mid,status); + right = new SegTree(mid,end,status); + } + left->setStatus(a,b,s); + right->setStatus(a,b,s); + status = max(left->status,right->status); + return; + } + + int getStatus(int a, int b) + { + if (a<=start && b>=end) + return status; + if (a>=end || b<=start) + return 0; + if (left==NULL) + return status; + int L = left->getStatus(a,b); + int R = right->getStatus(a,b); + return max(L,R); + } +}; + +class Solution { +public: + vector fallingSquares(vector>& positions) + { + SegTree* root = new SegTree(0,1e9,0); + vectorresult; + int curMax = 0; + for (auto p:positions) + { + int cur = root->getStatus(p[0], p[0]+p[1]); + curMax = max(curMax, cur+p[1]); + root->setStatus(p[0], p[0]+p[1], cur+p[1]); + result.push_back(curMax); + } + return result; + } +}; diff --git a/Segment_Tree/699.Falling-Squares/699.Falling-Squares_SegTree_v2.cpp b/Segment_Tree/699.Falling-Squares/699.Falling-Squares_SegTree_v2.cpp new file mode 100644 index 000000000..9b43d96b8 --- /dev/null +++ b/Segment_Tree/699.Falling-Squares/699.Falling-Squares_SegTree_v2.cpp @@ -0,0 +1,120 @@ +// 线段树大小在初始化时固定。支持Lazy Tag(延迟标记) + +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + int info; // the maximum value of the range + bool tag; + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + tag = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = max(left->info, right->info); // check with your own logic + } + } + + void pushDown() + { + if (tag==1 && left) + { + left->info = info; + right->info = info; + left->tag = 1; + right->tag = 1; + tag = 0; + } + } + + void updateRange(int a, int b, int val) // set range [a,b] with val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info = val; + tag = 1; + return; + } + + if (left) + { + pushDown(); + left->updateRange(a, b, val); + right->updateRange(a, b, val); + info = max(left->info, right->info); // write your own logic + } + } + + int queryRange(int a, int b) // query the maximum value within range [a,b] + { + if (b < start || a > end ) + { + return INT_MIN; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + int ret = max(left->queryRange(a, b), right->queryRange(a, b)); + info = max(left->info, right->info); // check with your own logic + return ret; + } + + return info; // should not reach here + } + +}; + +class Solution { +public: + vector fallingSquares(vector>& positions) + { + setSet; + for (auto & rect: positions) + { + Set.insert(rect[0]); + Set.insert(rect[0]+rect[1]); + } + unordered_mappos2idx; + int idx = 0; + for (auto x: Set) + { + pos2idx[x] = idx; + idx++; + } + int n = pos2idx.size(); + + SegTreeNode* root = new SegTreeNode(0, n-1, 0); + + int maxH = 0; + vectorrets; + for (auto & rect: positions) + { + int a = pos2idx[rect[0]]; + int b = pos2idx[rect[0]+rect[1]]; + int h = root->queryRange(a, b-1); // [a,b) + root->updateRange(a, b-1, h + rect[1]); + maxH = max(maxH, h + rect[1]); + rets.push_back(maxH); + } + return rets; + } +}; diff --git a/Segment_Tree/699.Falling-Squares/699.Falling-Squares_SegmentTree_LazyTag.cpp b/Segment_Tree/699.Falling-Squares/699.Falling-Squares_SegmentTree_LazyTag.cpp deleted file mode 100644 index 0e33f2a38..000000000 --- a/Segment_Tree/699.Falling-Squares/699.Falling-Squares_SegmentTree_LazyTag.cpp +++ /dev/null @@ -1,112 +0,0 @@ -class Solution { - class SegTreeNode - { - public: - SegTreeNode* left; - SegTreeNode* right; - int start, end; - int info; // the max height of the range - bool tag; - SegTreeNode(int a, int b):start(a),end(b),info(0),tag(0),left(NULL),right(NULL){} - }; - - void init(SegTreeNode* node, int a, int b) // init for range [a,b] - { - if (a==b) - { - node->info = 0; - return; - } - int mid = (a+b)/2; - if (node->left==NULL) - { - node->left = new SegTreeNode(a, mid); - node->right = new SegTreeNode(mid+1, b); - } - init(node->left, a, mid); - init(node->right, mid+1, b); - - node->info = 0; // write your own logic - } - - void updateRange(SegTreeNode* node, int a, int b, int val) - { - if (b < node->start || a > node->end ) // no intersection - return; - if (a <= node->start && node->end <=b) - { - node->info = val; - node->tag = 1; - return; - } - - pushDown(node); - updateRange(node->left, a, b, val); - updateRange(node->right, a, b, val); - - node->info = max(node->left->info, node->right->info); // write your own logic - } - - int queryRange(SegTreeNode* node, int a, int b) - { - if (b < node->start || a > node->end ) - { - return 0; // write your own logic - } - if (a <= node->start && b>=node->end) - { - return node->info; // write your own logic - } - pushDown(node); - node->info = max(queryRange(node->left, a, b), queryRange(node->right, a, b)); // write your own logic - return node->info; - } - - void pushDown(SegTreeNode* node) - { - if (node->tag==true) - { - node->left->info = node->info; - node->right->info = node->info; - node->left->tag = 1; - node->right->tag = 1; - node->tag = 0; - } - } - - -public: - vector fallingSquares(vector>& positions) - { - setSet; - for (auto & rect: positions) - { - Set.insert(rect[0]); - Set.insert(rect[0]+rect[1]); - } - unordered_mappos2idx; - int idx = 0; - for (auto x: Set) - { - pos2idx[x] = idx; - idx++; - } - int n = pos2idx.size(); - - SegTreeNode* root = new SegTreeNode(0, n-1); - init(root, 0, n-1); - - int maxH = 0; - vectorrets; - for (auto & rect: positions) - { - int a = pos2idx[rect[0]]; - int b = pos2idx[rect[0]+rect[1]]; - int h = queryRange(root, a, b-1); // [a,b) - updateRange(root, a, b-1, h + rect[1]); - maxH = max(maxH, h + rect[1]); - rets.push_back(maxH); - } - return rets; - } -}; diff --git a/Segment_Tree/699.Falling-Squares/699.Falling-Squares_segTree.cpp b/Segment_Tree/699.Falling-Squares/699.Falling-Squares_segTree.cpp deleted file mode 100644 index ad65408b7..000000000 --- a/Segment_Tree/699.Falling-Squares/699.Falling-Squares_segTree.cpp +++ /dev/null @@ -1,71 +0,0 @@ -class Solution { - class SegTree - { - public: - int start,end,status; - SegTree* left; - SegTree* right; - SegTree(int a,int b,int s):start(a),end(b),status(s),left(NULL),right(NULL){} - - void remove(SegTree* &node) - { - if (node==NULL) return; - remove(node->left); - remove(node->right); - delete node; - node=NULL; - return; - } - - void setStatus(int a, int b, int s) - { - if (a<=start && b>=end) - { - remove(left); - remove(right); - status = s; - return; - } - if (a>=end || b<=start) - return; - if (left==NULL) - { - int mid = (end-start)/2+start; - left = new SegTree(start,mid,status); - right = new SegTree(mid,end,status); - } - left->setStatus(a,b,s); - right->setStatus(a,b,s); - status = max(left->status,right->status); - return; - } - - int getStatus(int a, int b) - { - if (a<=start && b>=end) - return status; - if (a>=end || b<=start) - return 0; - if (left==NULL) - return status; - int L = left->getStatus(a,b); - int R = right->getStatus(a,b); - return max(L,R); - } - }; -public: - vector fallingSquares(vector>& positions) - { - SegTree root = SegTree(0,1e9,0); - vectorresult; - int curMax = 0; - for (auto p:positions) - { - int cur = root.getStatus(p.first,p.first+p.second); - curMax = max(curMax, cur+p.second); - root.setStatus(p.first,p.first+p.second, cur+p.second); - result.push_back(curMax); - } - return result; - } -}; diff --git a/Segment_Tree/715.Range-Module/715.Range-Module.cpp b/Segment_Tree/715.Range-Module/715.Range-Module.cpp index 182578df1..a79019b39 100644 --- a/Segment_Tree/715.Range-Module/715.Range-Module.cpp +++ b/Segment_Tree/715.Range-Module/715.Range-Module.cpp @@ -1,71 +1,57 @@ class RangeModule { mapMap; public: - RangeModule() - { - Map.clear(); + RangeModule() { } void addRange(int left, int right) { - auto pos1 = Map.lower_bound(left); int leftboundary=left; - if (pos1!=Map.begin() && prev(pos1,1)->second>=left) - leftboundary = prev(pos1,1)->first; - - auto pos2 = Map.upper_bound(right); int rightboundary = right; - if (pos2!=Map.begin()) - rightboundary = max(right, prev(pos2,1)->second); + + auto iter1 = Map.lower_bound(left); + if (iter1!=Map.begin() && prev(iter1)->second>=left) + { + iter1 = prev(iter1); + leftboundary = iter1->first; + } + + auto iter2 = Map.upper_bound(right); + if (iter2!=Map.begin() && prev(iter2)->second >= rightboundary) + rightboundary = prev(iter2)->second; - Map.erase(pos1,pos2); + Map.erase(iter1,iter2); Map[leftboundary]=rightboundary; - - /* - for (auto a:Map) - cout<second>=right); + auto iter = Map.upper_bound(left); + return (iter!=Map.begin() && prev(iter)->second>=right); } void removeRange(int left, int right) { - auto pos1 = Map.lower_bound(left); - bool flag1=0; - int temp1; - if (pos1!=Map.begin() && prev(pos1,1)->second > left) + auto iter1 = Map.lower_bound(left); + int start1 = 0, end1 = 0; + if (iter1!=Map.begin() && prev(iter1)->second > left) { - temp1 = prev(pos1,1)->first; - flag1=1; + iter1 = prev(iter1); + start1 = iter1->first; + end1 = left; } - - auto pos2 = Map.lower_bound(right); - int temp2; - bool flag2=0; - if (pos2!=Map.begin() && prev(pos2,1)->second > right) + + auto iter2 = Map.upper_bound(right); + int start2 = 0, end2 = 0; + if (iter2!=Map.begin() && prev(iter2)->second > right) { - temp2 = prev(pos2,1)->second; - flag2=1; + start2 = right; + end2 = prev(iter2)->second; } - - Map.erase(pos1,pos2); - if (flag1) Map[temp1]=left; - if (flag2) Map[right]=temp2; - - /* - for (auto a:Map) - cout<second >= left) //左边界部分重合 - leftBound = prev(pos1,1)->first; - -int rightBound = right; -auto pos2=Map.upper_bound(right); -if (pos2!=Map.begin() && prev(pos2,1)->first <= right) //右边界部分重合 - rightBound = max(right, prev(pos2,1)->second); - -Map.erase(pos1,pos2) // 删除一个前闭后开的迭代器区间 -Map[leftBound]=rightBound; +##### AddRange +加入一个新区间[left,right]时,我们考虑它与数轴上已有区间的关系。 +``` + A B C D +L____________ ______ __________R _____ + left_____________________right ``` +如上图所示,我们的目标是:删除已有的区间A、B、C,同时加入一个新的区间[L,R]. -删除一个interval时,要考虑这么几点: +如何定位区间A?A是左边界最后一个小于left的区间。所以我们用```iter1=prev(Map.lower_bound(left))```来定位这个区间,同时它可以拓展左边界,即```L = iter1->first```. 注意如果prev操作会越界的话,那么iter1就定位在Map.lower_bound(left),这是我们想要删除的最左边的区间。 -1. 要删除的区间是否和左边的某个区间部分重合?是的话,那么左边的那个区间就要缩短,重新赋值其右边界。 -2. 要删除的区间是否和右边的某个区间部分重合?是的话,那么右边的那个区间就要缩短,重新定义其左边界。 -3. 要删除的区间范围内的任何key都是需要抹去的。 +如果定位区间C?其实更容易定位的是区间D,那么就是第一个左边界大于right的区间。所以我们用```iter2=Map.upper_bound(right)```来定位这个区间。于是从iter1到iter2(不包括iter2本身)这些区间都可以删除,于是直接用```Map.erase(iter1, iter2)```. 注意到prev(iter2)就是区间C,区间C的右边界可能帮助我们拓展右边界,即```R = max(right, prev(iter2)->second)```. -代码如下: -```cpp -auto pos1=Map.lower_bound(left); -bool flag1=0; -if (pos1!=Map.begin() && prev(pos1,1)->second >= left) -{ - flag1=1; - temp1=prev(pos1,1)->second; -} - -auto pos2=Map.lower_bound(right); -bool flag=0; -int temp2; -if (pos2!=Map.begin() && prev(pos2,1)->second > right) -{ - flag2=1; - temp2=prev(pos2,1)->second; -} - -Map.erase(pos1,pos2); -if (flag1) Map[temp1]=left; -if (flag2) Map[right]=temp2; +最终我们添加新的区间[L,R]. + +##### RemoveRange +删除一个新区间[left,right]时,我们类似地考虑它与数轴上已有区间的关系。 ``` -特别注意,对于迭代器的修改操作,得安排在删除操作之后进行。 + A B C D +L____________ ______ __________R _____ + left_____________________right +``` +如上图所示,我们的目标是:删除已有的区间A、B、C,同时加入两个新区间[L,left], [right,R]. + +如何定位区间A?同上,A是左边界最后一个小于left的区间。所以我们用```iter1=prev(Map.lower_bound(left))```来定位这个区间。如果这个区间存在并且左边界比left更早,那么我们就要添加[L,left]. + +如何定位C?同理,先定位区间D,即```iter2=Map.upper_bound(right)```,然后我们考察D的前一个区间C的右边界是否比right更晚,那么我们就要添加[right,R]. + +注意,我们必须显操作```Map.erase(iter1, iter2)```,再添加这两个新区间。 + +##### QueryRange +只要用prev(Map.upper_bound(left))定位到左边界小于等于left的区间,再看区间右边界是否大于等于right即可。 -#### 解法2:使用线段树 +#### 解法2:使用线段树 (不推荐) 此题适合标准的线段树模型和数据结构。从难度上将,本题是基于307和370基础上的更进一步,因为我们需要再设计一个remove的操作。 @@ -133,4 +114,4 @@ setTree* right; ``` -[Leetcode Link](https://leetcode.com/problems/range-module) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/range-module) diff --git a/Simulation/2532.Time-to-Cross-a-Bridge/2532.Time-to-Cross-a-Bridge.cpp b/Simulation/2532.Time-to-Cross-a-Bridge/2532.Time-to-Cross-a-Bridge.cpp new file mode 100644 index 000000000..19f4b1e0e --- /dev/null +++ b/Simulation/2532.Time-to-Cross-a-Bridge/2532.Time-to-Cross-a-Bridge.cpp @@ -0,0 +1,71 @@ +using PII = pair; +class Solution { +public: + int findCrossingTime(int n, int k, vector>& time) + { + priority_queue, greater<>>leftArrive; + priority_queue, greater<>>rightArrive; + for (int i=0; ileftWait; + priority_queuerightWait; + + int ret = 0; + int crossed = 0; + int returned = 0; + + while (returned < n) + { + if (crossed == n) + { + while (!leftWait.empty()) + leftWait.pop(); + while (!leftArrive.empty()) + leftArrive.pop(); + } + + while (!leftArrive.empty() && leftArrive.top().first <= nextFree) + { + auto [arriveTime, id] = leftArrive.top(); + leftArrive.pop(); + leftWait.push({time[id][0]+time[id][2], id}); + } + while (!rightArrive.empty() && rightArrive.top().first <= nextFree) + { + auto [arriveTime, id] = rightArrive.top(); + rightArrive.pop(); + rightWait.push({time[id][0]+time[id][2], id}); + } + + if (leftWait.empty() && rightWait.empty()) + { + int t1 = leftArrive.empty() ? INT_MAX : leftArrive.top().first; + int t2 = rightArrive.empty() ? INT_MAX : rightArrive.top().first; + nextFree = min(t1, t2); + continue; + } + + if (!rightWait.empty()) // R -> L + { + auto [_, id] = rightWait.top(); + rightWait.pop(); + nextFree += time[id][2]; + leftArrive.push({nextFree+time[id][3], id}); + returned++; + ret = max(ret, nextFree); + } + else if (!leftWait.empty() && crossed < n) // L -> R + { + auto [_, id] = leftWait.top(); + leftWait.pop(); + nextFree += time[id][0]; + rightArrive.push({nextFree+time[id][1], id}); + crossed++; + } + } + + return nextFree; + } +}; diff --git a/Simulation/2532.Time-to-Cross-a-Bridge/Readme.md b/Simulation/2532.Time-to-Cross-a-Bridge/Readme.md new file mode 100644 index 000000000..3867500d0 --- /dev/null +++ b/Simulation/2532.Time-to-Cross-a-Bridge/Readme.md @@ -0,0 +1,15 @@ +### 2532.Time-to-Cross-a-Bridge + +首先注意,这是一个模拟题,不是一个优化策略问题。所有的规则已经完备地定义了所有工人该如何运作。我们需要设计代码将这个模拟过程实现。 + +我们针对左岸设立两个优先队列。leftArrive收集所有预期到达左岸(准备渡河)的工人id和时间,按照到达时间从早到晚排序。leftWait收集截止nextFree(桥空闲)时刻已经在等待渡河的工人id,按照efficiency排序。同理我们设计rightArrive和rightWait。 + +初始状态:所有的工人都在leftArrive里,到达时刻是0. 另外nextFree就是0. + +每个回合,我们根据当前的nextFree,将leftArrive里面所有到达时间早于nextFree的工人都转移到leftWait里。同理,将rightArrive里面所有到达时间早于nextFree的工人都转移到rightWait里。这意味着,在nextFree时刻,我们会在leftWait和rightWait这两个队列里按照规则选择一个人过桥。但是注意,此时可能出现一种情况,那就是leftWait和rightWait依然都为空。这是因为nextFree可能太早,即桥已经空闲的时候,左右两边还没有人到。这个时候,我们需要调整nextFree,将其延后至leftArrive和rightArrive队列中各自队首元素的较小值,然后根据更新后的nextFree重复上述过程,将一个或若干个工人加入wait队列。 + +在leftWait或rightWait有了人之后,我们就按照规则选取一个人过河。如果rightWait队列有人,我们就取排位第一的工人(记做id)去渡河,那么nextFree就会延后time[id][2](即过桥的过程),同时leftArrive就会新增一个预期的到达,即id会在时刻`nextFree+time[id][2]+time[id][3]`重回左岸。如果右边队列没人,我们就取leftWait排位第一的工人(记做id)去渡河,那么nextFree就会延后time[id][0],同时rightArrive就会新增一个预期的到达,即id会在时刻`nextFree+time[id][0]+time[id][1]`重回右岸。 + +当我们发现有n个人(次)从右岸到了左岸,那么第n次右岸到左岸的nextFree就是答案。 + +此外本题有个坑。我们只需要n个人(次)从左岸到右岸,如何之后继续允许左岸到右岸的操作,有可能会延误右岸的人的回归。所以当第n次左岸到右岸操作后,我们需要将永久性地将leftArrive和leftWait清空。 diff --git a/Heap/1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers/1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers.cpp b/Sorted_Container/1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers/1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers.cpp similarity index 100% rename from Heap/1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers/1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers.cpp rename to Sorted_Container/1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers/1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers.cpp diff --git a/Heap/1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers/Readme.md b/Sorted_Container/1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers/Readme.md similarity index 100% rename from Heap/1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers/Readme.md rename to Sorted_Container/1296.Divide-Array-in-Sets-of-K-Consecutive-Numbers/Readme.md diff --git a/Heap/1348.Tweet-Counts-Per-Frequency/1348.Tweet-Counts-Per-Frequency.cpp b/Sorted_Container/1348.Tweet-Counts-Per-Frequency/1348.Tweet-Counts-Per-Frequency.cpp similarity index 100% rename from Heap/1348.Tweet-Counts-Per-Frequency/1348.Tweet-Counts-Per-Frequency.cpp rename to Sorted_Container/1348.Tweet-Counts-Per-Frequency/1348.Tweet-Counts-Per-Frequency.cpp diff --git a/Heap/1348.Tweet-Counts-Per-Frequency/Readme.md b/Sorted_Container/1348.Tweet-Counts-Per-Frequency/Readme.md similarity index 100% rename from Heap/1348.Tweet-Counts-Per-Frequency/Readme.md rename to Sorted_Container/1348.Tweet-Counts-Per-Frequency/Readme.md diff --git a/Greedy/1488.Avoid-Flood-in-The-City/1488.Avoid-Flood-in-The-City.cpp b/Sorted_Container/1488.Avoid-Flood-in-The-City/1488.Avoid-Flood-in-The-City.cpp similarity index 100% rename from Greedy/1488.Avoid-Flood-in-The-City/1488.Avoid-Flood-in-The-City.cpp rename to Sorted_Container/1488.Avoid-Flood-in-The-City/1488.Avoid-Flood-in-The-City.cpp diff --git a/Greedy/1488.Avoid-Flood-in-The-City/Readme.md b/Sorted_Container/1488.Avoid-Flood-in-The-City/Readme.md similarity index 100% rename from Greedy/1488.Avoid-Flood-in-The-City/Readme.md rename to Sorted_Container/1488.Avoid-Flood-in-The-City/Readme.md diff --git a/Heap/1606.Find-Servers-That-Handled-Most-Number-of-Requests/1606.Find-Servers-That-Handled-Most-Number-of-Requests.cpp b/Sorted_Container/1606.Find-Servers-That-Handled-Most-Number-of-Requests/1606.Find-Servers-That-Handled-Most-Number-of-Requests.cpp similarity index 100% rename from Heap/1606.Find-Servers-That-Handled-Most-Number-of-Requests/1606.Find-Servers-That-Handled-Most-Number-of-Requests.cpp rename to Sorted_Container/1606.Find-Servers-That-Handled-Most-Number-of-Requests/1606.Find-Servers-That-Handled-Most-Number-of-Requests.cpp diff --git a/Heap/1606.Find-Servers-That-Handled-Most-Number-of-Requests/Readme.md b/Sorted_Container/1606.Find-Servers-That-Handled-Most-Number-of-Requests/Readme.md similarity index 100% rename from Heap/1606.Find-Servers-That-Handled-Most-Number-of-Requests/Readme.md rename to Sorted_Container/1606.Find-Servers-That-Handled-Most-Number-of-Requests/Readme.md diff --git a/Heap/1675.Minimize-Deviation-in-Array/1675.Minimize-Deviation-in-Array.cpp b/Sorted_Container/1675.Minimize-Deviation-in-Array/1675.Minimize-Deviation-in-Array.cpp similarity index 100% rename from Heap/1675.Minimize-Deviation-in-Array/1675.Minimize-Deviation-in-Array.cpp rename to Sorted_Container/1675.Minimize-Deviation-in-Array/1675.Minimize-Deviation-in-Array.cpp diff --git a/Heap/1675.Minimize-Deviation-in-Array/Readme.md b/Sorted_Container/1675.Minimize-Deviation-in-Array/Readme.md similarity index 100% rename from Heap/1675.Minimize-Deviation-in-Array/Readme.md rename to Sorted_Container/1675.Minimize-Deviation-in-Array/Readme.md diff --git a/Heap/1825.Finding-MK-Average/1825.Finding-MK-Average.cpp b/Sorted_Container/1825.Finding-MK-Average/1825.Finding-MK-Average.cpp similarity index 100% rename from Heap/1825.Finding-MK-Average/1825.Finding-MK-Average.cpp rename to Sorted_Container/1825.Finding-MK-Average/1825.Finding-MK-Average.cpp diff --git a/Heap/1825.Finding-MK-Average/Readme.md b/Sorted_Container/1825.Finding-MK-Average/Readme.md similarity index 100% rename from Heap/1825.Finding-MK-Average/Readme.md rename to Sorted_Container/1825.Finding-MK-Average/Readme.md diff --git a/Heap/1847.Closest-Room/1847.Closest-Room.cpp b/Sorted_Container/1847.Closest-Room/1847.Closest-Room.cpp similarity index 100% rename from Heap/1847.Closest-Room/1847.Closest-Room.cpp rename to Sorted_Container/1847.Closest-Room/1847.Closest-Room.cpp diff --git a/Heap/1847.Closest-Room/Readme.md b/Sorted_Container/1847.Closest-Room/Readme.md similarity index 100% rename from Heap/1847.Closest-Room/Readme.md rename to Sorted_Container/1847.Closest-Room/Readme.md diff --git a/Heap/1912.Design-Movie-Rental-System/1912.Design-Movie-Rental-System.cpp b/Sorted_Container/1912.Design-Movie-Rental-System/1912.Design-Movie-Rental-System.cpp similarity index 100% rename from Heap/1912.Design-Movie-Rental-System/1912.Design-Movie-Rental-System.cpp rename to Sorted_Container/1912.Design-Movie-Rental-System/1912.Design-Movie-Rental-System.cpp diff --git a/Heap/1912.Design-Movie-Rental-System/Readme.md b/Sorted_Container/1912.Design-Movie-Rental-System/Readme.md similarity index 100% rename from Heap/1912.Design-Movie-Rental-System/Readme.md rename to Sorted_Container/1912.Design-Movie-Rental-System/Readme.md diff --git a/Heap/2102.Sequentially-Ordinal-Rank-Tracker/2102.Sequentially-Ordinal-Rank-Tracker_v1.cpp b/Sorted_Container/2102.Sequentially-Ordinal-Rank-Tracker/2102.Sequentially-Ordinal-Rank-Tracker_v1.cpp similarity index 100% rename from Heap/2102.Sequentially-Ordinal-Rank-Tracker/2102.Sequentially-Ordinal-Rank-Tracker_v1.cpp rename to Sorted_Container/2102.Sequentially-Ordinal-Rank-Tracker/2102.Sequentially-Ordinal-Rank-Tracker_v1.cpp diff --git a/Heap/2102.Sequentially-Ordinal-Rank-Tracker/2102.Sequentially-Ordinal-Rank-Tracker_v2.cpp b/Sorted_Container/2102.Sequentially-Ordinal-Rank-Tracker/2102.Sequentially-Ordinal-Rank-Tracker_v2.cpp similarity index 100% rename from Heap/2102.Sequentially-Ordinal-Rank-Tracker/2102.Sequentially-Ordinal-Rank-Tracker_v2.cpp rename to Sorted_Container/2102.Sequentially-Ordinal-Rank-Tracker/2102.Sequentially-Ordinal-Rank-Tracker_v2.cpp diff --git a/Heap/2102.Sequentially-Ordinal-Rank-Tracker/2102.Sequentially-Ordinal-Rank-Tracker_v3.cpp b/Sorted_Container/2102.Sequentially-Ordinal-Rank-Tracker/2102.Sequentially-Ordinal-Rank-Tracker_v3.cpp similarity index 100% rename from Heap/2102.Sequentially-Ordinal-Rank-Tracker/2102.Sequentially-Ordinal-Rank-Tracker_v3.cpp rename to Sorted_Container/2102.Sequentially-Ordinal-Rank-Tracker/2102.Sequentially-Ordinal-Rank-Tracker_v3.cpp diff --git a/Heap/2102.Sequentially-Ordinal-Rank-Tracker/Readme.md b/Sorted_Container/2102.Sequentially-Ordinal-Rank-Tracker/Readme.md similarity index 100% rename from Heap/2102.Sequentially-Ordinal-Rank-Tracker/Readme.md rename to Sorted_Container/2102.Sequentially-Ordinal-Rank-Tracker/Readme.md diff --git a/Heap/220.Contains-Duplicate-III/220.Contains-Duplicate-III.cpp b/Sorted_Container/220.Contains-Duplicate-III/220.Contains-Duplicate-III.cpp similarity index 100% rename from Heap/220.Contains-Duplicate-III/220.Contains-Duplicate-III.cpp rename to Sorted_Container/220.Contains-Duplicate-III/220.Contains-Duplicate-III.cpp diff --git a/Heap/220.Contains-Duplicate-III/Readme.md b/Sorted_Container/220.Contains-Duplicate-III/Readme.md similarity index 100% rename from Heap/220.Contains-Duplicate-III/Readme.md rename to Sorted_Container/220.Contains-Duplicate-III/Readme.md diff --git a/Sorted_Container/2213.Longest-Substring-of-One-Repeating-Character/2213.Longest-Substring-of-One-Repeating-Character.cpp b/Sorted_Container/2213.Longest-Substring-of-One-Repeating-Character/2213.Longest-Substring-of-One-Repeating-Character.cpp new file mode 100644 index 000000000..50c1f566e --- /dev/null +++ b/Sorted_Container/2213.Longest-Substring-of-One-Repeating-Character/2213.Longest-Substring-of-One-Repeating-Character.cpp @@ -0,0 +1,101 @@ +class Solution { + mapMap; + multisetSet; +public: + vector longestRepeating(string s, string queryCharacters, vector& queryIndices) + { + int n = s.size(); + for (int i=0; irets; + for (int k=0; kfirst, b = iter->second; + if (a==b && a==idx) return; + + removeInterval(a); + + if (a == idx) + { + addInterval(a, a); + addInterval(a+1, b); + } + else if (b == idx) + { + addInterval(b, b); + addInterval(a, b-1); + } + else + { + addInterval(a, idx-1); + addInterval(idx, idx); + addInterval(idx+1, b); + } + } + + void mergeRight(int idx, string&s) + { + if (idx == s.size()-1) return; + if (s[idx] != s[idx+1]) return; + + auto iter = Map.lower_bound(idx+1); + int b = iter->second; + + removeInterval(idx); + removeInterval(idx+1); + addInterval(idx, b); + } + + void mergeLeft(int idx, string&s) + { + if (idx == 0) return; + if (s[idx] != s[idx-1]) return; + + auto iter = Map.lower_bound(idx); + iter = prev(iter); + int a = iter->first; + int b = Map[idx]; + + removeInterval(idx); + removeInterval(a); + addInterval(a, b); + } +}; diff --git a/Sorted_Container/2213.Longest-Substring-of-One-Repeating-Character/Readme.md b/Sorted_Container/2213.Longest-Substring-of-One-Repeating-Character/Readme.md new file mode 100644 index 000000000..5e3985f08 --- /dev/null +++ b/Sorted_Container/2213.Longest-Substring-of-One-Repeating-Character/Readme.md @@ -0,0 +1,15 @@ +### 2213.Longest-Substring-of-One-Repeating-Character + +字符串里连续的相同字符可以视为一个区间,那么整个字符串就包含了若干个“紧密”贴合的区间。我们可以用一个有序map来放置这些区间,将key设置为区间首的位置,val设置为区间尾的位置,并且map自动按照key来排列。 + +对于任何一个字符的改变,可能会产生一个或者多个新区间,也有可能会造成一些区间的合并,似乎头绪非常繁杂。在这里,一个比较好的思路就是分三步走:当改变位于idx的字符时,先无脑地新增一个区间[idx,idx],然后再考察往右合并邻接区间(如果可能的话),再考察往右左并邻接区间(如果可能的话)。 + +当我们考虑新增区间时,需要先找出原先包含idx的区间[a,b],其中a<=idx<=b。我们用```prev(Map.upper_bound(idx))```得到的最后一个小于等于idx的位置。接下来分为四种可能: +1. 如果a==b==idx,那么不用变化 +2. 如果a==idxMap; + int ret = 0; +public: + CountIntervals() { + + } + + void add(int left, int right) + { + unordered_settemp; + + int start = left; + auto iter = Map.lower_bound(left); + if (iter!=Map.begin() && prev(iter)->second>=start) + { + iter = prev(iter); + temp.insert(iter->first); + start = min(start, iter->first); + } + + int end = right; + iter = Map.lower_bound(left); + if (iter!=Map.begin()) + end = max(end, prev(iter)->second); + while (iter!=Map.end() && iter->first<=end) + { + temp.insert(iter->first); + end = max(end, iter->second); + iter = next(iter); + } + + for (int x: temp) + { + ret -= Map[x]-x+1; + Map.erase(x); + } + ret += end-start+1; + Map[start] = end; + } + + int count() { + return ret; + } +}; + +/** + * Your CountIntervals object will be instantiated and called as such: + * CountIntervals* obj = new CountIntervals(); + * obj->add(left,right); + * int param_2 = obj->count(); + */ diff --git a/Sorted_Container/2276.Count-Integers-in-Intervals/Readme.md b/Sorted_Container/2276.Count-Integers-in-Intervals/Readme.md new file mode 100644 index 000000000..d41546d9c --- /dev/null +++ b/Sorted_Container/2276.Count-Integers-in-Intervals/Readme.md @@ -0,0 +1,41 @@ +### 2276.Count-Integers-in-Intervals + +此题的本质就是```715.Range-Module```. 我们常用map来维护互不重叠的区间,其中的key代表了区间的起点(并是有序排列),value表示区间的终点。 + +基本思想:我们维护一个计数器count表示当前有多少整数被记录。每当加入一个新区间,我们需要标记删除哪些旧区间(因为会与新区间重叠或相交),同时在count里减去这些旧区间对应的数字个数。然后加入新区间,同时在count里加上新区间的个数。注意新区间不一定就是[left,right],而是可能与旧区间merge后的大区间。 + +对于一个新区间[left,right],我们首先考虑left左边需要删除哪些旧区间。只有一种情况,就是如果left左边有一个区间与新区间重合的时候。如图 +``` + A B +_________ _____ _________ + __________________ + left right +``` +判定起来也很方便,用```iter = Map.lower_bound(left)```来定位区间B,然后prev(iter)就是区间A。如果A的右边界大于left,那么A区间就要被删除。此时,我们需要注意,之后加入的新区间因为要与A区间merge,它的起点将是```start = A->first```. + +对于一个新区间[left,right]右边需要删除的区间,则可能会有多个。如图 +``` + A B C D +_________ __ ___ _________ + __________________ + left right +``` +我们从B开始,一路向后遍历区间,直至发现D是最后一个左边界与right勾搭上的区间。于是区间B、C、D都会是需要待删除的区间。同理我们需要注意,之后加入的新区间因为要与BCD区间merge,它的终点将是```end = D->second```. 因此我们的代码长得如下: +```cpp +int end = right; +auto iter = Map.lower_bound(left); +while (iter!=Map.end() && iter->first <= end) +{ + end = max(end, iter->second); + iter = next(iter); +} +``` +但是这里有个疏漏,事实上A的右边界可以很靠后,所以初始值的end必须同样要考虑到A->second。所以要添加一行: +```cpp +int end = right; +auto iter = Map.lower_bound(left); +if (iter!=Map.begin()) + end = max(end, prev(iter)->second); +``` + +最终我们将这些标记要删除的区间都从Map里删除,并添加上新的```Map[start] = end```. diff --git a/Sorted_Container/2382.Maximum-Segment-Sum-After-Removals/2382.Maximum-Segment-Sum-After-Removals.cpp b/Sorted_Container/2382.Maximum-Segment-Sum-After-Removals/2382.Maximum-Segment-Sum-After-Removals.cpp new file mode 100644 index 000000000..a91a537b3 --- /dev/null +++ b/Sorted_Container/2382.Maximum-Segment-Sum-After-Removals/2382.Maximum-Segment-Sum-After-Removals.cpp @@ -0,0 +1,50 @@ +using LL = long long; +class Solution { +public: + vector maximumSegmentSum(vector& nums, vector& removeQueries) + { + int n = nums.size(); + mapMap; // start->end + multisetSet; // segment sum + + vectorpresum(n); + for (int i=0; irets; + + for (int t: removeQueries) + { + auto iter = Map.upper_bound(t); + iter = prev(iter); + + int start = iter->first; + int end = iter->second; + + Map.erase(start); + LL sum = presum[end] - (start==0?0:presum[start-1]); + Set.erase(Set.lower_bound(sum)); + + if (start <= t-1) + { + Map[start] = t-1; + Set.insert(presum[t-1] - (start==0?0:presum[start-1])); + } + + if (t+1 <= end) + { + Map[t+1] = end; + Set.insert(presum[end] - (t+1==0?0:presum[t+1-1])); + } + + LL ret = Set.empty()? 0: (*Set.rbegin()); + rets.push_back(ret); + } + + return rets; + + } +}; diff --git a/Sorted_Container/2382.Maximum-Segment-Sum-After-Removals/Readme.md b/Sorted_Container/2382.Maximum-Segment-Sum-After-Removals/Readme.md new file mode 100644 index 000000000..749dc9310 --- /dev/null +++ b/Sorted_Container/2382.Maximum-Segment-Sum-After-Removals/Readme.md @@ -0,0 +1,5 @@ +### 2382.Maximum-Segment-Sum-After-Removals + +我们用一个有序Map来维护当前存在的segments,其中key是起点,val是终点。另外我们在用一个有序的multiset来维护当前存在的segment sum。 + +每次我们考虑一个分割点t,根据题意,它必然存在一个segment里。可以用upper_bound在Map里找到这个segment,记做[start,end]。我们只需要将这个segment从Map中移出,并将对应的segment sum从Set里移出。再将新生成的两个新区间(如果存在的话)和各自的区间和放入Map和Set里即可。每次query之后,可以从Set里直接读出当前的最大值。 diff --git a/Sorted_Container/2612.Minimum-Reverse-Operations/2612.Minimum-Reverse-Operations.cpp b/Sorted_Container/2612.Minimum-Reverse-Operations/2612.Minimum-Reverse-Operations.cpp new file mode 100644 index 000000000..d4f90937b --- /dev/null +++ b/Sorted_Container/2612.Minimum-Reverse-Operations/2612.Minimum-Reverse-Operations.cpp @@ -0,0 +1,52 @@ +class Solution { +public: + vector minReverseOperations(int n, int p, vector& banned, int k) + { + setodd; + seteven; + setbanned_set(banned.begin(), banned.end()); + for (int i=0; iq; + q.push(p); + vectorrets(n, -1); + rets[p] = 0; + + int step = 0; + while (!q.empty()) + { + step++; + int len = q.size(); + while (len--) + { + int i = q.front(); + q.pop(); + int L0 = max(0, i-k+1); + int j0 = (2*L0+k-1)-i; + + int L1 = min(n-k, i); + int j1 = (2*L1+k-1)-i; + + set*s; + if (j0%2==0) s = &even; + else s = &odd; + + auto iter = s->lower_bound(j0); + while (iter!=s->end() && *iter<=j1) + { + rets[*iter] = step; + q.push(*iter); + s->erase(iter++); + } + } + } + + return rets; + } +}; diff --git a/Sorted_Container/2612.Minimum-Reverse-Operations/Readme.md b/Sorted_Container/2612.Minimum-Reverse-Operations/Readme.md new file mode 100644 index 000000000..9a8588740 --- /dev/null +++ b/Sorted_Container/2612.Minimum-Reverse-Operations/Readme.md @@ -0,0 +1,9 @@ +### 2612.Minimum-Reverse-Operations + +此题类似于jump game,从起点开始,根据滑窗的不同位置,可以将1移动到多个不同的地方。然后下一轮,再根据滑窗的不同位置,可以将1继续移动到不同的地方。依次类推,可以用BFS求出1到达各个位置所用的最短步数(也就是用了几轮BFS)。 + +我们假设1的初始位置是i,滑窗的左右边界是L和R(且`R-L+1=k`),那么1就可以通过翻转从i到新位置`j = L+R-i = 2*L-i-1`,这是一个仅关于L的函数。考虑滑窗长度固定,且必须包含位置i,所以L的最左边可以到达`i-k+1`,最右边可以到达`i`。此外,L不能越界,即必须在[0,n-1]内,所以L的左边界其实是`L0=max(0,i-k+1)`,右边界其实是`min(i,n-1)`. 于是对应的j的移动范围就是`2*L0-i-1`到`2*L1-i-1`之间,并且随着L从小到大移动,j的变动始终是+2. + +我们在尝试进行BFS的时候,最大的问题就是,我们通过i进行一次revert得到的j会有很多位置(因为滑窗可以运动),其中很多j可能是之前已经遍历过的(也就是已经确定了一个更少的步数就可以到达),我们需要挨个检验的话时间复杂度就会很高。本题有巧解。对于一次revert,j的候选点的编号要么都是同奇数(要么都是偶数),并且在奇数(或者偶数)意义上是连续的!所以我们事先将所有编号是奇数的点作为一个集合odd,将所有编号是偶数的点作为一个集合even,那么这次revert相当于在odd(或者even)上删除一段区间range(删除意味着遍历过)。只要集合是有序的,那么我们就可以很快定位到range在集合里的位置,将range在集合里面的元素都删除。因为每个元素只会在集合里最多被删除一次(以后的range定位都不会涉及已经删除的元素),所以我们可以用近乎线性的时间知道每个元素是在什么时候从集合里删除的,这就是可以到达的最小步数。 + +对于banned里面的元素,只需要实现从odd和even里排除即可。 diff --git a/Sorted_Container/2653.Sliding-Subarray-Beauty/2653.Sliding-Subarray-Beauty.cpp b/Sorted_Container/2653.Sliding-Subarray-Beauty/2653.Sliding-Subarray-Beauty.cpp new file mode 100644 index 000000000..a30b6f08d --- /dev/null +++ b/Sorted_Container/2653.Sliding-Subarray-Beauty/2653.Sliding-Subarray-Beauty.cpp @@ -0,0 +1,54 @@ +class Solution { +public: + vector getSubarrayBeauty(vector& nums, int k, int x) + { + multisetSet1; + multisetSet2; + vectorrets; + + for (int i=0; i nums[i]) + { + Set1.erase(Set1.find(v)); + Set2.insert(v); + Set1.insert(nums[i]); + } + else + { + Set2.insert(nums[i]); + } + } + + if (Set1.size() + Set2.size() == k) + { + int v = *Set1.rbegin(); + rets.push_back(min(v, 0)); + } + + if (i>=k-1) + { + int v = nums[i-k+1]; + auto iter = Set2.find(v); + if (iter!=Set2.end()) + Set2.erase(iter); + else + { + Set1.erase(Set1.find(v)); + if (!Set2.empty()) + { + Set1.insert(*Set2.begin()); + Set2.erase(Set2.begin()); + } + } + } + } + + return rets; + } +}; diff --git a/Sorted_Container/2653.Sliding-Subarray-Beauty/Readme.md b/Sorted_Container/2653.Sliding-Subarray-Beauty/Readme.md new file mode 100644 index 000000000..f9ee488d0 --- /dev/null +++ b/Sorted_Container/2653.Sliding-Subarray-Beauty/Readme.md @@ -0,0 +1,8 @@ +### 2653.Sliding-Subarray-Beauty + +本题如果利用`-50 <= nums[i] <= 50`的条件,那么可以变得很容易。在这里我们只讲更一般的解法。 + +和`Dual PQ`的思路一样,设计两个有序容器,分别是装“最小的x的元素”Set1,和“剩余的元素”Set2。对于新元素nums[i],我们需要操作的步骤是: +1. 判断应该将nums[i]放入Set1还是Set2. 如果比Set1的最大元素还大,就放入Set2;否则就将Set1的最大元素转移到Set2,并将nums[i]放入Set1。 +2. 如果i>=x-1,输出Set1里的最大元素作为答案。 +3. 将nums[i-x+1]从集合中移除。需要判断nums[i-x+1]此时在Set1里还是Set2里。如果是前者的话,需要将Set2里的元素转移一个过去, diff --git a/Sorted_Container/2736.Maximum-Sum-Queries/2736.Maximum-Sum-Queries.cpp b/Sorted_Container/2736.Maximum-Sum-Queries/2736.Maximum-Sum-Queries.cpp new file mode 100644 index 000000000..e9a3b9f92 --- /dev/null +++ b/Sorted_Container/2736.Maximum-Sum-Queries/2736.Maximum-Sum-Queries.cpp @@ -0,0 +1,42 @@ +class Solution { +public: + vector maximumSumQueries(vector& nums1, vector& nums2, vector>& queries) + { + map>>Map; + for (int i=0; irets(queries.size(), -1); + + vector>nums; + for (int i=0; ifirst <= x) + { + set>& s = iter->second; + auto iter2 = s.begin(); + while (iter2 != s.end() && iter2->first <= y) + { + rets[iter2->second] = val; + s.erase(iter2++); + } + if (s.empty()) + Map.erase(iter++); + else + iter++; + } + } + + return rets; + } +}; diff --git a/Sorted_Container/2736.Maximum-Sum-Queries/Readme.md b/Sorted_Container/2736.Maximum-Sum-Queries/Readme.md new file mode 100644 index 000000000..c4afc3b96 --- /dev/null +++ b/Sorted_Container/2736.Maximum-Sum-Queries/Readme.md @@ -0,0 +1,11 @@ +### 2736.Maximum-Sum-Queries + +如果我们将每个query独立地去做,需要暴力地扫所有的nums。一种常见的应对思路是`Off-line Querying`,将query进行某种意义上的排序,通常先解决的query会对后面的query帮助。但是这个思路似乎对本题没有帮助。比如说,将query按照x从大到小排序,随着query的逐一解答,我们可用的nums也会逐渐增多,但是并不能帮我们方便地兼顾“满足关于y的约束”以及“取最大sum”。 + +但是此题还有另外一种对偶的思路,将nums进行某种意义上的排序。我们发现,对于sum最大的num,任何满足x和y约束的所有query,必然会取该sum作为答案,既然找到了答案,那么就可以从待求的query的集合中删除。为了容易找到这些满足约束的query,我们可以将所有query先按照x排序,再按照y排序,构造二层的数据结构。这样,在第一层,任何x小于nums1[j]的query都会入选;然后在对应的第二层,任何y小于nums2[j]的query都可以被选中,标记它们的答案是sum。可以发现,这些被选中的query是分块连续的,我们可以很方便地删除。 + +同理,我们再处理sum为次大的num,删除所有答案是它的query。以此类推。 + +分析时间复杂度:我们令num的个数是m,query的个数是n。我们对于每个num,都会在query集合里删除答案对应是num的query。注意在第二层,每个query只会被访问和删除一次。所以代码核心的时间复杂度是`M+N`. 不过预处理有一个对num和query分别排序的过程。 + +注意,为了提高效率,如果某个二层集合里的query被删空了,务必把它们的一层指针也移除。 diff --git a/Sorted_Container/2907.Maximum-Profitable-Triplets-With-Increasing-Prices-I/2907.Maximum-Profitable-Triplets-With-Increasing-Prices-I_v1.cpp b/Sorted_Container/2907.Maximum-Profitable-Triplets-With-Increasing-Prices-I/2907.Maximum-Profitable-Triplets-With-Increasing-Prices-I_v1.cpp new file mode 100644 index 000000000..8e57d325c --- /dev/null +++ b/Sorted_Container/2907.Maximum-Profitable-Triplets-With-Increasing-Prices-I/2907.Maximum-Profitable-Triplets-With-Increasing-Prices-I_v1.cpp @@ -0,0 +1,144 @@ +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + int info; // the maximum value of the range + bool tag; + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + tag = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = max(left->info, right->info); // check with your own logic + } + } + + SegTreeNode(int a, int b, vector& val) // init for range [a,b] with the same-size array val + { + tag = 0; + info = 0; + start = a, end = b; + if (a==b) + { + info = val[a]; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + void pushDown() + { + if (tag==1 && left) + { + left->info = info; + right->info = info; + left->tag = 1; + right->tag = 1; + tag = 0; + } + } + + void updateRange(int a, int b, int val) // set range [a,b] with val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info = val; + tag = 1; + return; + } + + if (left) + { + pushDown(); + left->updateRange(a, b, val); + right->updateRange(a, b, val); + info = max(left->info, right->info); // write your own logic + } + } + + int queryRange(int a, int b) // query the maximum value within range [a,b] + { + if (b < start || a > end ) + { + return INT_MIN/2; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + int ret = max(left->queryRange(a, b), right->queryRange(a, b)); + info = max(left->info, right->info); // check with your own logic + return ret; + } + + return info; // should not reach here + } + +}; + +class Solution { +public: + int maxProfit(vector& prices, vector& profits) + { + setSet(prices.begin(), prices.end()); + unordered_mapMap; + int m = 0; + for (int x: Set) + { + Map[x] = m; + m++; + } + + SegTreeNode* root1 = new SegTreeNode(0, m-1, -1); // Set the leaf nodes with initVals. + SegTreeNode* root2 = new SegTreeNode(0, m-1, -1); // Set the leaf nodes with initVals. + + int n = prices.size(); + vectorleft(n, -1); + for (int i=0; iqueryRange(0, Map[prices[i]]-1); + if (profits[i] > root1->queryRange(Map[prices[i]], Map[prices[i]])) + root1->updateRange(Map[prices[i]], Map[prices[i]], profits[i]); // set the range [start, end] with val + } + + vectorright(n, -1); + for (int i=n-1; i>=0; i--) + { + right[i] = root2->queryRange(Map[prices[i]]+1, m-1); + if (profits[i] > root2->queryRange(Map[prices[i]], Map[prices[i]])) + root2->updateRange(Map[prices[i]], Map[prices[i]], profits[i]); // set the range [start, end] with val + } + + int ret = -1; + for (int i=0; i& prices, vector& profits) + { + int n = prices.size(); + mapMap; + + vectorleft(n, -1); + for (int i=0; isecond; + } + if (Map.find(prices[i])!=Map.end() && profits[i]<=Map[prices[i]]) + continue; + if (profits[i] <= left[i]) + continue; + Map[prices[i]] = profits[i]; + + iter = Map.upper_bound(prices[i]); + while (iter!=Map.end() && iter->second <= profits[i]) + iter = Map.erase(iter); + } + + Map.clear(); + vectorright(n, -1); + for (int i=n-1; i>=0; i--) + { + auto iter = Map.upper_bound(prices[i]); + if (iter!=Map.end()) + { + right[i] = iter->second; + } + if (Map.find(prices[i])!=Map.end() && profits[i]<=Map[prices[i]]) + continue; + if (profits[i] <= right[i]) + continue; + Map[prices[i]] = profits[i]; + + iter = Map.find(prices[i]); + map::reverse_iterator rit(iter); + // Note rit is actually at a one-position diff before iter. + vectorto_delete; + while (rit!=Map.rend() && rit->second <= profits[i]) + { + int key = rit->first; + rit = next(rit); + to_delete.push_back(key); + } + for (auto key: to_delete) Map.erase(key); + } + + int ret = -1; + for (int i=0; i& nums) + { + int n = nums.size(); + vectorarr(n); + for (int i=0; idp; + LL ret = LLONG_MIN; + + for (int i=0; isecond + nums[i]); + } + else + { + dp[x] = nums[i]; + } + + ret = max(ret, dp[x]); + + iter = dp.find(x); + iter = next(iter); + while (iter!=dp.end() && iter->second <= dp[x]) + iter = dp.erase(iter); + } + + return ret; + } +}; diff --git a/Sorted_Container/2926.Maximum-Balanced-Subsequence-Sum/Readme.md b/Sorted_Container/2926.Maximum-Balanced-Subsequence-Sum/Readme.md new file mode 100644 index 000000000..d8ad9e38f --- /dev/null +++ b/Sorted_Container/2926.Maximum-Balanced-Subsequence-Sum/Readme.md @@ -0,0 +1,31 @@ +### 2926.Maximum-Balanced-Subsequence-Sum + +很明显,变形一下式子就有```nums[i_j] - i_j >= nums[i_(j-1)] - i_(j-1)```. 我们令新数组`arr[i] = nums[i]-i`,我们就是想要在arr里面找一个递增的subsequence,记做{k},使得这个subsequence对应的 {nums[k]} 的和能够最大。 + +很容易看出可以用o(N^2)的dp来做。令dp[i]表示以i为结尾的递增subsequence的最大nums之和。那么就有 +```cpp +for (int i=0; i=B,那么对于任何一个新元素arr[i]=x,我们如果可以把x接在b后面构造子序列,那么显然不如我们把x接在a后面构成子序列更优。这样我们就可以把b从dp里弹出去。 + +所以我们将dp按照key和value都递增的顺序排列后,一个最大的好处出现了。对于任何一个新元素arr[i]=x,我们不需要在dp里遍历所有key小于x的元素,只需要知道恰好小于等于x的key(假设是y),那么就有`dp[x]=dp[y]+nums[i]`。任何key比y小的元素,虽然都可以接上x,但是它们的value并没有dp[y]有优势。 + +当我们确定dp[x]的最优值之后,再将x插入dp里面。记得此时要向后依次检查比x大的那些key,看它们的value(也就是dp值)是否小于dp[x],是的话就将他们弹出去。 + +时间复杂度:对于任何的arr[i]=x,我们在dp里面按照key二分查询恰好小于等于x的key,是log(n)。所以总的时间复杂度是o(NlogN). 有人会问,似乎每个回合,都有线性弹出的操作,但其实总共你最多只会弹出N个元素,这个弹出操作的总是也只是o(N),与循环无关。 + + diff --git a/Sorted_Container/2940.Find-Building-Where-Alice-and-Bob-Can-Meet/2940.Find-Building-Where-Alice-and-Bob-Can-Meet.cpp b/Sorted_Container/2940.Find-Building-Where-Alice-and-Bob-Can-Meet/2940.Find-Building-Where-Alice-and-Bob-Can-Meet.cpp new file mode 100644 index 000000000..a3c30e7b2 --- /dev/null +++ b/Sorted_Container/2940.Find-Building-Where-Alice-and-Bob-Can-Meet/2940.Find-Building-Where-Alice-and-Bob-Can-Meet.cpp @@ -0,0 +1,47 @@ +class Solution { +public: + vector leftmostBuildingQueries(vector& heights, vector>& queries) + { + int n = heights.size(); + + for (int i=0; i&a, vector&b){ + return a[1]>b[1]; + }); + + vectorrets(queries.size()); + int i = n-1; + mapMap; + for (auto& query: queries) + { + int a = query[0], b = query[1], idx = query[2]; + while (i>=b) + { + while (!Map.empty() && heights[i] >= (Map.begin()->first)) + Map.erase(Map.begin()); + Map[heights[i]] = i; + i--; + } + + if (heights[a] < heights[b] || a==b) + { + rets[idx] = b; + continue; + } + + int m = max(heights[a],heights[b]); + auto iter = Map.upper_bound(m); + if (iter!=Map.end()) + rets[idx] = iter->second; + else + rets[idx] = -1; + } + + return rets; + } +}; diff --git a/Sorted_Container/2940.Find-Building-Where-Alice-and-Bob-Can-Meet/Readme.md b/Sorted_Container/2940.Find-Building-Where-Alice-and-Bob-Can-Meet/Readme.md new file mode 100644 index 000000000..614774896 --- /dev/null +++ b/Sorted_Container/2940.Find-Building-Where-Alice-and-Bob-Can-Meet/Readme.md @@ -0,0 +1,11 @@ +### 2940.Find-Building-Where-Alice-and-Bob-Can-Meet + +我们考虑一个query所给的两个位置a和b(其中aheights[y],那么事实上y就可以从容器里移除。因为x更靠近左边且更高,任何满足(a,b)->y的query,必然也满足(a,b)->x且x是比y更优的解(更靠近左边)。这就提示我们,如果我们将heights里的元素按照从右往左的顺序加入有序容器的话,那么就可以用上述的性质:新柱子的加入可以弹出所有比它矮的旧柱子。这就导致了这个有序容器里的柱子不仅是按照height递增的,而且他们对应的index也是递增的。也就是说,有序容器里对于任意的heights[x] i`(for i>b),同时更新容器移除陈旧的值(即那些相比于i,更靠右且更矮的柱子)。然后一个upper_bound解决该query。往容器里添加和删除元素的数据量都是线性的。 + +此外,本题需要处理两个小细节。如果heights[a]==heights[b]以及a==b的这两种情况,直接输出答案b即可。 diff --git a/Sorted_Container/2945.Find-Maximum-Non-decreasing-Array-Length/2945.Find-Maximum-Non-decreasing-Array-Length.cpp b/Sorted_Container/2945.Find-Maximum-Non-decreasing-Array-Length/2945.Find-Maximum-Non-decreasing-Array-Length.cpp new file mode 100644 index 000000000..0d65ac09a --- /dev/null +++ b/Sorted_Container/2945.Find-Maximum-Non-decreasing-Array-Length/2945.Find-Maximum-Non-decreasing-Array-Length.cpp @@ -0,0 +1,39 @@ +using LL = long long; +class Solution { +public: + int findMaximumLength(vector& nums) + { + int n = nums.size(); + nums.insert(nums.begin(), 0); + + vectordp(n+1,-1); + vectorlen(n+1,-1); + vectorpresum(n+1); + for (int i=1; i<=n; i++) + presum[i] = presum[i-1] + nums[i]; + + dp[0] = 0; + len[0] = 0; + int ret = 0; + mapMap; + Map[0] = 0; + for (int i=1; i<=n; i++) + { + auto iter = Map.upper_bound(presum[i]); + if (iter!=Map.begin()) + { + int j = prev(iter)->second; + len[i] = len[j]+1; + dp[i] = presum[i] - presum[j]; + } + + while (!Map.empty() && Map.rbegin()->first >= presum[i]+dp[i]) + Map.erase(prev(Map.end())); + + Map[presum[i]+dp[i]] = i; + } + + return len[n]; + + } +}; diff --git a/Sorted_Container/2945.Find-Maximum-Non-decreasing-Array-Length/Readme.md b/Sorted_Container/2945.Find-Maximum-Non-decreasing-Array-Length/Readme.md new file mode 100644 index 000000000..18971673c --- /dev/null +++ b/Sorted_Container/2945.Find-Maximum-Non-decreasing-Array-Length/Readme.md @@ -0,0 +1,25 @@ +### 2945.Find-Maximum-Non-decreasing-Array-Length + +我们考虑,如果以nums[i]为某段subarray的结尾,那么我们在[1:i]前缀里能够得到的符合条件的最长序列。我们记最后这段subarray sum为dp[i]. 显然,我们需要找到一个位置j,使得dp[j]<=dp[i](其中dp[i]=sum[j+1:i])。为了使得序列尽量长,我们自然希望dp[i]能尽量小,故在所有符合条件的j里,我们一定会找最大的j。因此我们可以有这段dp代码: +```cpp +for (int i=1; i<=n; i++) +{ + LL sum = nums[i]; + int j = i-1; + while (j>=0 && sum < dp[j]) + { + sum += nums[j]; + j--; + } + dp[i] = sum; + len[i] = len[j]+1; +} +return len[n]; +``` +但是这个算法的时间复杂度是o(N^2)。 + +我们将关系式`dp[j]<=dp[i]`改写为`dp[j]<=presum[i]-presum[j]`,即`presum[i] >= presum[j]+dp[j]`. 显然,我们将所有已经得到的那些映射`presum[j]+dp[j] -> j`(因为下标小于i,故是已知量),提前放入一个有序map里,用二分搜索就可以找到对于i而言符合条件的key的范围。那么如何再找到其中最大的j呢?理论上我们需要把这些key都遍历一遍,检查他们的value。但我们会想到一个常见的套路:如果保证这个map不仅是按照key递增有序的、同时也是按照value递增有序的,那么我们就只需要一次二分搜索即可定位恰好小于等于presum[i]的key,那个key所对应的value就是我们想要的最大j,而不需要再遍历寻找value的最大值。 + +根据以上的数据结构,我们就可以轻松求出i所对应的j,以及dp[i]和len[i]。接下来我们需要将`presum[i]+dp[i] -> i`放入map里去。注意,我们依然想要保证map按照key和value都是递增有序的。事实上,我们将`presum[i]+dp[i]`作为key插入map之后,map里比其大的key所对应的value都必然小于i(因为它们是nums里位于i之前的index),这些元素都可以从map里删去。这是因为它们的key既大(不容易让后续的presum接上),value也小(index也靠前),各方面都不及`presum[i]+dp[i] -> i`优秀,今后注定不会被用到。将他们弹出之后,我们发现,map依然保持了我们想要的双递增的性质。 + +故这样的算法时间复杂度就是o(nlogn). diff --git a/Heap/295.Find-Median-from-Data-Stream/295.Find-Median-from-Data-Stream.cpp b/Sorted_Container/295.Find-Median-from-Data-Stream/295.Find-Median-from-Data-Stream.cpp similarity index 100% rename from Heap/295.Find-Median-from-Data-Stream/295.Find-Median-from-Data-Stream.cpp rename to Sorted_Container/295.Find-Median-from-Data-Stream/295.Find-Median-from-Data-Stream.cpp diff --git a/Heap/295.Find-Median-from-Data-Stream/295.Find-Median-from-Data-Stream_v2.cpp b/Sorted_Container/295.Find-Median-from-Data-Stream/295.Find-Median-from-Data-Stream_v2.cpp similarity index 100% rename from Heap/295.Find-Median-from-Data-Stream/295.Find-Median-from-Data-Stream_v2.cpp rename to Sorted_Container/295.Find-Median-from-Data-Stream/295.Find-Median-from-Data-Stream_v2.cpp diff --git a/Heap/295.Find-Median-from-Data-Stream/Readme.md b/Sorted_Container/295.Find-Median-from-Data-Stream/Readme.md similarity index 100% rename from Heap/295.Find-Median-from-Data-Stream/Readme.md rename to Sorted_Container/295.Find-Median-from-Data-Stream/Readme.md diff --git a/Sorted_Container/3013.Divide-an-Array-Into-Subarrays-With-Minimum-Cost-II/3013.Divide-an-Array-Into-Subarrays-With-Minimum-Cost-II.cpp b/Sorted_Container/3013.Divide-an-Array-Into-Subarrays-With-Minimum-Cost-II/3013.Divide-an-Array-Into-Subarrays-With-Minimum-Cost-II.cpp new file mode 100644 index 000000000..bc7aaec45 --- /dev/null +++ b/Sorted_Container/3013.Divide-an-Array-Into-Subarrays-With-Minimum-Cost-II/3013.Divide-an-Array-Into-Subarrays-With-Minimum-Cost-II.cpp @@ -0,0 +1,58 @@ +using LL = long long; +class Solution { +public: + long long minimumCost(vector& nums, int k, int dist) + { + int n = nums.size(); + + multisetSet1; + multisetSet2; + + LL sum = 0; + LL ret = LLONG_MAX; + + k--; + + for (int i=1; i=dist+1) + { + ret = min(ret, sum); + + int t = nums[i-dist]; + if (Set2.find(t)!=Set2.end()) + Set2.erase(Set2.find(t)); + else + { + Set1.erase(Set1.find(t)); + sum -= t; + if (!Set2.empty()) + { + Set1.insert(*Set2.begin()); + sum += *Set2.begin(); + Set2.erase(Set2.begin()); + } + } + } + } + + return ret + nums[0]; + + } +}; diff --git a/Sorted_Container/3013.Divide-an-Array-Into-Subarrays-With-Minimum-Cost-II/Readme.md b/Sorted_Container/3013.Divide-an-Array-Into-Subarrays-With-Minimum-Cost-II/Readme.md new file mode 100644 index 000000000..e647631d7 --- /dev/null +++ b/Sorted_Container/3013.Divide-an-Array-Into-Subarrays-With-Minimum-Cost-II/Readme.md @@ -0,0 +1,11 @@ +### 3013.Divide-an-Array-Into-Subarrays-With-Minimum-Cost-II + +本题的本质就是从nums[1]开始,寻找一个长度为dist+1的滑窗,使得里面top k smallest的元素和最小。 + +对于求top k smallest,有常规的套路,就是用两个multiset。将滑窗内的top k smallest放入Set1,其余元素放入Set2. + +当滑窗移动时,需要加入进入的新元素k。我们需要考察是否能进入Set1(与尾元素比较)。如果能,那么需要将Set1的尾元素取出,放入Set2. 否则,将k放入Set2。 + +同理,当滑窗移动时,我们需要移除离开滑窗的旧元素k。我们考察k是否是Set1的元素。如果是,那么我们需要将Set1的k取出,同时将Set2的首元素加入进Set1里。 + +以上操作不断更新Set1的时候(加入元素和弹出元素),同时维护一个Set1元素的和变量sum,找到全局最小值即可。 diff --git a/Heap/352.Data-Stream-as-Disjoint-Intervals/352.Data Stream as Disjoint Intervals.cpp b/Sorted_Container/352.Data-Stream-as-Disjoint-Intervals/352.Data Stream as Disjoint Intervals.cpp similarity index 100% rename from Heap/352.Data-Stream-as-Disjoint-Intervals/352.Data Stream as Disjoint Intervals.cpp rename to Sorted_Container/352.Data-Stream-as-Disjoint-Intervals/352.Data Stream as Disjoint Intervals.cpp diff --git a/Heap/352.Data-Stream-as-Disjoint-Intervals/352.Data-Stream-as-Disjoint-Intervals-v2.cpp b/Sorted_Container/352.Data-Stream-as-Disjoint-Intervals/352.Data-Stream-as-Disjoint-Intervals-v2.cpp similarity index 100% rename from Heap/352.Data-Stream-as-Disjoint-Intervals/352.Data-Stream-as-Disjoint-Intervals-v2.cpp rename to Sorted_Container/352.Data-Stream-as-Disjoint-Intervals/352.Data-Stream-as-Disjoint-Intervals-v2.cpp diff --git a/Heap/352.Data-Stream-as-Disjoint-Intervals/352.Data-Stream-as-Disjoint-Intervals-v3.cpp b/Sorted_Container/352.Data-Stream-as-Disjoint-Intervals/352.Data-Stream-as-Disjoint-Intervals-v3.cpp similarity index 100% rename from Heap/352.Data-Stream-as-Disjoint-Intervals/352.Data-Stream-as-Disjoint-Intervals-v3.cpp rename to Sorted_Container/352.Data-Stream-as-Disjoint-Intervals/352.Data-Stream-as-Disjoint-Intervals-v3.cpp diff --git a/Heap/352.Data-Stream-as-Disjoint-Intervals/Readme.md b/Sorted_Container/352.Data-Stream-as-Disjoint-Intervals/Readme.md similarity index 100% rename from Heap/352.Data-Stream-as-Disjoint-Intervals/Readme.md rename to Sorted_Container/352.Data-Stream-as-Disjoint-Intervals/Readme.md diff --git a/Heap/363.Max-Sum-of-Rectangle-No-Larger-Than-K/363.Max-Sum-of-Rectangle-No-Larger-Than-K.cpp b/Sorted_Container/363.Max-Sum-of-Rectangle-No-Larger-Than-K/363.Max-Sum-of-Rectangle-No-Larger-Than-K.cpp similarity index 100% rename from Heap/363.Max-Sum-of-Rectangle-No-Larger-Than-K/363.Max-Sum-of-Rectangle-No-Larger-Than-K.cpp rename to Sorted_Container/363.Max-Sum-of-Rectangle-No-Larger-Than-K/363.Max-Sum-of-Rectangle-No-Larger-Than-K.cpp diff --git a/Heap/363.Max-Sum-of-Rectangle-No-Larger-Than-K/Readme.md b/Sorted_Container/363.Max-Sum-of-Rectangle-No-Larger-Than-K/Readme.md similarity index 100% rename from Heap/363.Max-Sum-of-Rectangle-No-Larger-Than-K/Readme.md rename to Sorted_Container/363.Max-Sum-of-Rectangle-No-Larger-Than-K/Readme.md diff --git a/Sorted_Container/3672.Sum-of-Weighted-Modes-in-Subarrays/3672.Sum-of-Weighted-Modes-in-Subarrays.cpp b/Sorted_Container/3672.Sum-of-Weighted-Modes-in-Subarrays/3672.Sum-of-Weighted-Modes-in-Subarrays.cpp new file mode 100644 index 000000000..5d8ff6837 --- /dev/null +++ b/Sorted_Container/3672.Sum-of-Weighted-Modes-in-Subarrays/3672.Sum-of-Weighted-Modes-in-Subarrays.cpp @@ -0,0 +1,33 @@ +class Solution { +public: + long long modeWeight(vector& nums, int k) { + map>freq2val; + unordered_mapval2freq; + long long ret = 0; + + for (int i=0; i=k-1) { + int x = *(freq2val.rbegin()->second.begin()); + ret += (long long)x*val2freq[x]; + + x = nums[i-k+1]; + int f = val2freq[x]; + freq2val[f].erase(x); + if (freq2val[f].empty()) + freq2val.erase(f); + val2freq[x] = f-1; + if (f!=1) + freq2val[f-1].insert(x); + } + } + return ret; + } +}; diff --git a/Sorted_Container/3672.Sum-of-Weighted-Modes-in-Subarrays/Readme.md b/Sorted_Container/3672.Sum-of-Weighted-Modes-in-Subarrays/Readme.md new file mode 100644 index 000000000..6b1b98d65 --- /dev/null +++ b/Sorted_Container/3672.Sum-of-Weighted-Modes-in-Subarrays/Readme.md @@ -0,0 +1,31 @@ +### 3672.Sum-of-Weighted-Modes-in-Subarrays + +比较容易想到设计两个容器: +```cpp +map>freq2val; +unordered_mapval2freq; +``` +freq2val[f]里面放所有频率为f的元素(按照从小到大排序)。这样每个sliding window,我们就可以用`*(freq2val.rbegin()->second.begin())`找到其中的众数。val2[freq]顾名思义就是每种元素的频次。 + +每次我们加入一个新元素x,可以查到它的f。于是小心地更新这两个容器即可 +```cpp +// 先删除{x,f} +freq2val[f].erase(x); +if (freq2val[f].empty()) + freq2val.erase(f); +// 再加入{x,f+1} +val2freq[x]=f+1; +freq2val[f+1].insert(x); +``` +同理,每次我们删除一个旧元素x,可以查到它的f。也是小心地更新这两个容器即可 +```cpp +// 先删除{x,f} +freq2val[f].erase(x); +if (freq2val[f].empty()) + freq2val.erase(f); +// 再加入{x,f-1} +val2freq[x] = f-1; +if (f!=1) + freq2val[f-1].insert(x); +``` +关键就是及时清理freq2val里没有值的key(即空频次),保证freq2val的结尾元素是有意义的。 diff --git a/Heap/480.Sliding-Window-Median/480.Sliding-Window-Median.cpp b/Sorted_Container/480.Sliding-Window-Median/480.Sliding-Window-Median.cpp similarity index 100% rename from Heap/480.Sliding-Window-Median/480.Sliding-Window-Median.cpp rename to Sorted_Container/480.Sliding-Window-Median/480.Sliding-Window-Median.cpp diff --git a/Heap/480.Sliding-Window-Median/Readme.md b/Sorted_Container/480.Sliding-Window-Median/Readme.md similarity index 100% rename from Heap/480.Sliding-Window-Median/Readme.md rename to Sorted_Container/480.Sliding-Window-Median/Readme.md diff --git a/Heap/632.Smallest-Range-Covering-Elements-from-K-Lists/632.Smallest-Range-Covering-Elements-from-K-Lists.cpp b/Sorted_Container/632.Smallest-Range-Covering-Elements-from-K-Lists/632.Smallest-Range-Covering-Elements-from-K-Lists.cpp similarity index 100% rename from Heap/632.Smallest-Range-Covering-Elements-from-K-Lists/632.Smallest-Range-Covering-Elements-from-K-Lists.cpp rename to Sorted_Container/632.Smallest-Range-Covering-Elements-from-K-Lists/632.Smallest-Range-Covering-Elements-from-K-Lists.cpp diff --git a/Heap/632.Smallest-Range-Covering-Elements-from-K-Lists/Readme.md b/Sorted_Container/632.Smallest-Range-Covering-Elements-from-K-Lists/Readme.md similarity index 100% rename from Heap/632.Smallest-Range-Covering-Elements-from-K-Lists/Readme.md rename to Sorted_Container/632.Smallest-Range-Covering-Elements-from-K-Lists/Readme.md diff --git a/Heap/729.My-Calendar-I/729.My-Calendar-I.cpp b/Sorted_Container/729.My-Calendar-I/729.My-Calendar-I.cpp similarity index 100% rename from Heap/729.My-Calendar-I/729.My-Calendar-I.cpp rename to Sorted_Container/729.My-Calendar-I/729.My-Calendar-I.cpp diff --git a/Heap/729.My-Calendar-I/Readme.md b/Sorted_Container/729.My-Calendar-I/Readme.md similarity index 100% rename from Heap/729.My-Calendar-I/Readme.md rename to Sorted_Container/729.My-Calendar-I/Readme.md diff --git a/Others/855.Exam-Room/855.Exam-Room.cpp b/Sorted_Container/855.Exam-Room/855.Exam-Room.cpp similarity index 100% rename from Others/855.Exam-Room/855.Exam-Room.cpp rename to Sorted_Container/855.Exam-Room/855.Exam-Room.cpp diff --git a/Sorted_Container/855.Exam-Room/Readme.md b/Sorted_Container/855.Exam-Room/Readme.md new file mode 100644 index 000000000..d9e8ea023 --- /dev/null +++ b/Sorted_Container/855.Exam-Room/Readme.md @@ -0,0 +1,13 @@ +### 855.Exam-Room.cpp + +本题比较偷懒的方法就是用有序容器set来盛装所有人的位置。在调用seat()的时候,就用迭代器遍历set的所有元素,查找相邻元素a和b之间的距离```diff=b-a```,那么如果要在这个区间内插入一人,显然只能插在```a+diff/2```这个地方,并且“离相邻人的最远距离”就是```diff/2```. + +需要特别注意的是如果set里的第一个元素不是0,那么我们可以选择在0位置插入。同理如果set里的最后一个元素不是n-1,那么我们可以选择在n-1位置插入。这两个地方需要单独处理。 + +最终我们选择全局“离相邻人的最远距离”的最优解来安排新人的位置。所以seat()的时间复杂度是o(n). + +对于leave(int p),则简单多了,可以直接o(1)从set里删除即可。 + +当然本题还有复杂的做法,就是将所有连续的空座区间按照“插入后离相邻人的最远距离”放入优先队列。这样我们可以log(n)的时间实现seat()。但是对于leave(p)的操作,似乎就只能靠o(n)来遍历再删除了。 + +[Leetcode Link](https://leetcode.com/problems/exam-room) diff --git a/Heap/975.Odd-Even-Jump/975.Odd-Even-Jump.cpp b/Sorted_Container/975.Odd-Even-Jump/975.Odd-Even-Jump.cpp similarity index 100% rename from Heap/975.Odd-Even-Jump/975.Odd-Even-Jump.cpp rename to Sorted_Container/975.Odd-Even-Jump/975.Odd-Even-Jump.cpp diff --git a/Heap/975.Odd-Even-Jump/Readme.md b/Sorted_Container/975.Odd-Even-Jump/Readme.md similarity index 100% rename from Heap/975.Odd-Even-Jump/Readme.md rename to Sorted_Container/975.Odd-Even-Jump/Readme.md diff --git a/Stack/032.Longest-Valid-Parentheses/Readme.md b/Stack/032.Longest-Valid-Parentheses/Readme.md index b7595b214..8946d5401 100644 --- a/Stack/032.Longest-Valid-Parentheses/Readme.md +++ b/Stack/032.Longest-Valid-Parentheses/Readme.md @@ -8,6 +8,6 @@ 由此,if possible,我们可以为每一个右括号i,寻找与之匹配的左括号j的位置(即离它左边最近的、可以匹配的左括号)。并且我们可以确定,[j:i]这对括号内的字符肯定也是已经正确匹配了的。 -但是[j:i]就一定是以j结尾的最长的合法字串了吗?不一定。此时观察,将栈顶元素j退栈“对消”之后,此时新的栈顶元素对应的位置并不一定是与j相邻的。中间这段“空隙”意味着什么呢?对,这段“空隙”是之前已经退栈了的其他合法字符串。所以我们可以在区间[j:i]的左边再加上这段长度。因此,真正的“以j结尾的最长的合法字串”的长度是```i - Stack.top()```。注意stack存放的是所有字符的index。 +但是[j:i]就一定是以i结尾的最长的合法字串了吗?不一定。此时观察,将栈顶元素j退栈“对消”之后,此时新的栈顶元素对应的位置并不一定是与j相邻的。中间这段“空隙”意味着什么呢?对,这段“空隙”是之前已经退栈了的其他合法字符串。所以我们可以在区间[j:i]的左边再加上这段长度。因此,真正的“以j结尾的最长的合法字串”的长度是```i - Stack.top()```。注意stack存放的是所有字符的index。 [Leetcode Link](https://leetcode.com/problems/longest-valid-parentheses) diff --git a/Stack/084.Largest-Rectangle-in-Histogram/084.Largest-Rectangle-in-Histogram_v1.cpp b/Stack/084.Largest-Rectangle-in-Histogram/084.Largest-Rectangle-in-Histogram_v1.cpp new file mode 100644 index 000000000..88a0ba060 --- /dev/null +++ b/Stack/084.Largest-Rectangle-in-Histogram/084.Largest-Rectangle-in-Histogram_v1.cpp @@ -0,0 +1,36 @@ +class Solution { +public: + int largestRectangleArea(vector& heights) + { + int n = heights.size(); + stackstk; + vectornextSmaller(n, n); + for (int i=0; i heights[i]) + { + nextSmaller[stk.top()] = i; + stk.pop(); + } + stk.push(i); + } + + while (!stk.empty()) stk.pop(); + vectorprevSmaller(n, -1); + for (int i=heights.size()-1; i>=0; i--) + { + while (!stk.empty() && heights[stk.top()] > heights[i]) + { + prevSmaller[stk.top()] = i; + stk.pop(); + } + stk.push(i); + } + + int ret = 0; + for (int i=0; i& heights) + { + heights.insert(heights.begin(),0); + heights.push_back(0); + stackStack; + int result = 0; + for (int i=0; i heights[i]) + { + int H = heights[Stack.top()]; + Stack.pop(); + result = max(result, H*(i-Stack.top()-1)); + } + Stack.push(i); + } + return result; + } +}; diff --git a/Stack/084.Largest-Rectangle-in-Histogram/84-Largest-Rectangle-in-Histogram.cpp b/Stack/084.Largest-Rectangle-in-Histogram/84-Largest-Rectangle-in-Histogram.cpp deleted file mode 100644 index 07e44f972..000000000 --- a/Stack/084.Largest-Rectangle-in-Histogram/84-Largest-Rectangle-in-Histogram.cpp +++ /dev/null @@ -1,43 +0,0 @@ -class Solution { -public: - /** - * @param height: A list of integer - * @return: The area of largest rectangle in the histogram - */ - int largestRectangleArea(vector &height) - { - if (height.size()==0) return 0; - if (height.size()==1) return height[0]; - - height.push_back(0); - height.insert(height.begin(),0); - stacks; - - int result=0; - - for (int i=0; i=height[s.top()]) - { - s.push(i); - continue; - } - - if (height[i]height[i]) - { - - int Height = height[s.top()]; - s.pop(); - result = max(result, Height*(i-s.top()-1)); - - } - s.push(i); - } - } - - return result; - } -}; diff --git a/Stack/084.Largest-Rectangle-in-Histogram/Readme.md b/Stack/084.Largest-Rectangle-in-Histogram/Readme.md index 8d901f659..fabb5ef63 100644 --- a/Stack/084.Largest-Rectangle-in-Histogram/Readme.md +++ b/Stack/084.Largest-Rectangle-in-Histogram/Readme.md @@ -1,32 +1,14 @@ ### leetcode-84-Largest-Rectangle-in-Histogram -#### 此类是贪心法的典型题。 ----------- -贪心法的原则是维护一个递增(严格的说是非递减)的栈序列s,s里面是所给数组元素的index(注意不是数组元素本身)。当下一个元素满足递增的要求时,入栈: -```c -if (height[i]>height[s.top()]) - s.push(height[i]); -``` -当下一个元素不满足递增的要求时,就退栈处理栈顶的一些元素,使得即将入列的元素依然满足递增关系。退栈处理的过程中可以方便地考察那些退栈元素所围成的最大面积。其高度易知是height[s.top()],但宽度是什么呢?注意是和次顶元素的位置有关: -```cpp -while (height[s.back()]>height[i]) -{ - Height = height[s.top()]; - s.pop(); // 提取次顶元素的位置 - result = max(result, Height * (i-s.top()-1);   -} -``` -注意如果写成以下就是错误的: -```c -result = max(result, height[s.top()] * (i-s.top()); -``` +此题是单调栈的经典应用。对于每个位置height[i],找出它的prevSmaller和nextSmaller,那么中间的区间,就可以构建一个以height[i]为高的矩形。 +比较笨的方法就是写3-pass。第一遍是用单调栈求出所有每个元素i的prevSmaller[i]。再逆序遍历一遍,求出每个元素i的nextSmaller[i]。最后一遍计算```area[i] = height[i]*(nextSmaller[i]-prevSmaller[i]-1)```. -原因是次顶元素和栈顶元素可能在index上并不是相邻的,中间可能隔着一些已经被处理掉的大数。因此在考虑当前的栈顶元素围成的面积,应该包括这些位置,所以其宽度不仅是i-s.top(),而要更大。   +高级一点的方法只需要1-pass。维护一个递增的单调栈。当新元素i比栈顶元素小时,说明栈顶元素的nextSmaller就是i,而栈顶元素的prevSmaller就是栈的次顶元素。 + +#### 其他的技巧: -其他的技巧:   ----------- 在height数组末添加元素0,是为了保证最后强制回溯。在height数组首端添加元素0,是为了便于处理s.pop()之后栈为空的特殊情况;这样处理后永远不会栈空。 -[Leetcode Link](https://leetcode.com/problems/largest-rectangle-in-histogram) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/largest-rectangle-in-histogram) diff --git a/Stack/085.Maximal-Rectangle/85-Maximal-Rectangle.cpp b/Stack/085.Maximal-Rectangle/85-Maximal-Rectangle.cpp index f31412f12..c7e9c7e14 100644 --- a/Stack/085.Maximal-Rectangle/85-Maximal-Rectangle.cpp +++ b/Stack/085.Maximal-Rectangle/85-Maximal-Rectangle.cpp @@ -1,66 +1,44 @@ class Solution { public: - /** - * @param matrix a boolean 2D matrix - * @return an integer - */ - int maximalRectangle(vector > &matrix) + int maximalRectangle(vector>& matrix) { int M=matrix.size(); - if (M==0) return 0; int N=matrix[0].size(); - auto q= vector(N,0); - int result = 0; + auto hist = vector(N,0); + int result=0; for (int i=0; iheight) - { - if (height.size()==0) return 0; - if (height.size()==1) return height[0]; - - stacks; - height.push_back(0); - height.insert(height.begin(),0); - - int result=0; - - for (int i=0; i heights) + { + heights.insert(heights.begin(),0); + heights.push_back(0); + stackStack; + int result = 0; + for (int i=0; i=height[s.top()]) + while (!Stack.empty() && heights[Stack.top()] > heights[i]) { - s.push(i); - continue; - } - - if (height[i]>& mat) + { + int m = mat.size(), n = mat[0].size(); + vectorh(n+1, 0); + + int ret = 0; + + for (int i=0; istk; + stk.push(0); + int c = 0; + for (int j=1; j<=n; j++) + { + while (!stk.empty() && h[stk.top()] > h[j]) + { + int p1 = stk.top(); + stk.pop(); + int p2 = stk.top(); + c = c - (p1-p2)*(h[p1]-h[j]); + } + c += h[j]; + ret += c; + stk.push(j); + } + } + + return ret; + } +}; diff --git a/Stack/1504.Count-Submatrices-With-All-Ones/Readme.md b/Stack/1504.Count-Submatrices-With-All-Ones/Readme.md new file mode 100644 index 000000000..95f8c2de8 --- /dev/null +++ b/Stack/1504.Count-Submatrices-With-All-Ones/Readme.md @@ -0,0 +1,15 @@ +### 1504.Count-Submatrices-With-All-Ones + +此题的数据量非常小,可以o(MMN)暴力解决。但是有更巧妙的o(MN)做法。 + +和85相同的技巧,我们逐行处理,更新以第i行为底座的histogram。然后逐列处理histogram里面的柱子,我们试图用单调栈来判定:以第j根柱子为右边界的矩形有多少个。 + +我们想象,如果histogram里面的柱子都是递增的。假设以第j-1根柱子为右边界的矩形有count[j-1]个,并且第j根柱子比第j-1根的高,那么将这些count[j-1]个矩形向右延伸靠到第j根柱子上的话,都会变成有效的count[j]。此外,我们只需要再计数仅包括第j根柱子本身的矩形,故`count[j] = count[j-1] + nums[j]`. + +当我们如果遇到第j根柱子矮于第j-1根柱子呢?那么并不是所有count[j-1]个矩形都可以继承并延伸成为j的一部分。我们需要退回那些“超高”的部分,即高度差为`nums[j]-nums[j-1]`的这部分矩形我们要吐出去,剩余的矩形才能继承成为j的一部分。此外,我们发现,如果nums[j-2]也高于nums[j]的话,这样的回吐过程还要继续进行下去。 + +于是这一切都提示我们用单调栈。 + +使用单调栈时特别要注意,假设栈顶元素的index是p1,次栈顶元素的index是p2,p1与p2不一定连着的。这是因为之前p1将(p2,p1)之间的元素都逼出栈了。但这并不是说中间没有柱子了,而是意味着,(p2,p1)之间存在着与p1等高的柱子。所以p1退栈的时候,需要退出的矩形数目其实是`(p1-p2)*(nums[p1]-nums[j])`. + +退栈之后记得别忘了算上nums[j](也就是仅包含第j根柱子的矩形),并将j压入栈顶。 diff --git a/Stack/224.Basic-Calculator/basic_calculator.java b/Stack/224.Basic-Calculator/basic_calculator.java new file mode 100644 index 000000000..0d3c461e3 --- /dev/null +++ b/Stack/224.Basic-Calculator/basic_calculator.java @@ -0,0 +1,43 @@ +class Solution { + public int calculate(String s) { + + StringBuilder sb = new StringBuilder(); + sb.append("+"); + for(char c: s.toCharArray()){ + if(c == ' ') continue; + sb.append(c); + if(c == '('){ + sb.append("+"); + } + } + Deque nums = new ArrayDeque<>(); + Deque signs = new ArrayDeque<>(); + int sum = 0; + int sign = 0; + + for(int i = 0; i < sb.length(); i++){ + if(sb.charAt(i) == '+' || sb.charAt(i) == '-' ){ + sign = sb.charAt(i) == '+' ? 1 : -1; + }else if(Character.isDigit(sb.charAt(i))){ + int cur = 0; + int j = i; + while(j < sb.length() && Character.isDigit(sb.charAt(j))){ + cur = cur * 10 + (sb.charAt(j) - '0'); + + j+=1; + } + i = j-1; + sum += cur * sign; + }else if(sb.charAt(i) == '('){ + nums.addFirst(sum); + signs.addFirst(sign); + sum = 0; + }else if(sb.charAt(i) == ')'){ + sum = nums.pollFirst() + signs.pollFirst() * sum; + + } + } + + return sum; + } +} diff --git a/Stack/2282.Number-of-People-That-Can-Be-Seen-in-a-Grid/2282.Number-of-People-That-Can-Be-Seen-in-a-Grid.cpp b/Stack/2282.Number-of-People-That-Can-Be-Seen-in-a-Grid/2282.Number-of-People-That-Can-Be-Seen-in-a-Grid.cpp new file mode 100644 index 000000000..6d38951d8 --- /dev/null +++ b/Stack/2282.Number-of-People-That-Can-Be-Seen-in-a-Grid/2282.Number-of-People-That-Can-Be-Seen-in-a-Grid.cpp @@ -0,0 +1,52 @@ +class Solution { +public: + vector> seePeople(vector>& heights) + { + int m = heights.size(), n = heights[0].size(); + vector>rets(m, vector(n,0)); + + for (int i=0; istk; + for (int j=0; j= heights[i][stk.top()]) + { + rets[i][stk.top()]++; + lastRemove = heights[i][stk.top()]; + stk.pop(); + } + if (!stk.empty() && lastRemove != heights[i][j]) + { + rets[i][stk.top()]++; + } + stk.push(j); + } + + } + + for (int j=0; jstk; + for (int i=0; i= heights[stk.top()][j]) + { + rets[stk.top()][j]++; + lastRemove = heights[stk.top()][j]; + stk.pop(); + } + if (!stk.empty() && lastRemove != heights[i][j]) + { + rets[stk.top()][j]++; + } + stk.push(i); + } + } + + return rets; + + } +}; diff --git a/Stack/2282.Number-of-People-That-Can-Be-Seen-in-a-Grid/Readme.md b/Stack/2282.Number-of-People-That-Can-Be-Seen-in-a-Grid/Readme.md new file mode 100644 index 000000000..9121b75cd --- /dev/null +++ b/Stack/2282.Number-of-People-That-Can-Be-Seen-in-a-Grid/Readme.md @@ -0,0 +1,7 @@ +### 2282.Number-of-People-That-Can-Be-Seen-in-a-Grid + +此题是1944的升级版,区别在于本题允许存在相同的元素。 + +基本思路是一致的。我们从左往右维护一个严格单调递减的栈。如果有新元素nums[i]大于等于栈顶元素,意味着这个栈顶元素今后的视线都会被nums[i]遮住再也看不到其他。所以将栈顶元素的计数器加1之后(对应的是它能看到nums[i]),就可以将这个栈顶元素移除了。 + +当该退栈的元素都拿走之后,此时的栈顶元素(如果存在)必然大于nums[i],理论上需要将这个栈顶元素的计时器也加1. 但是这里有一个特例,比如```3,1,1```。第二个1会把第一个1弹出再入栈,但是注意3虽然大于第二个1,可它是看不到第二个1的。因此,如果新元素nums[i]如果从栈顶刚弹出了与自己相同的元素,那么它就不能再被此时栈顶的大元素的计数器所加1(虽然大于nums[i]). diff --git a/Stack/2334.Subarray-With-Elements-Greater-Than-Varying-Threshold/2334.Subarray-With-Elements-Greater-Than-Varying-Threshold.cpp b/Stack/2334.Subarray-With-Elements-Greater-Than-Varying-Threshold/2334.Subarray-With-Elements-Greater-Than-Varying-Threshold.cpp new file mode 100644 index 000000000..9d32d277b --- /dev/null +++ b/Stack/2334.Subarray-With-Elements-Greater-Than-Varying-Threshold/2334.Subarray-With-Elements-Greater-Than-Varying-Threshold.cpp @@ -0,0 +1,26 @@ +class Solution { +public: + int validSubarraySize(vector& nums, int threshold) + { + nums.insert(nums.begin(), 0); + nums.push_back(0); + + int n = nums.size(); + stackstk; + for (int i=0; i threshold) + return i-stk.top()-1; + } + stk.push(i); + } + + return -1; + + } +}; diff --git a/Stack/2334.Subarray-With-Elements-Greater-Than-Varying-Threshold/Readme.md b/Stack/2334.Subarray-With-Elements-Greater-Than-Varying-Threshold/Readme.md new file mode 100644 index 000000000..464a33287 --- /dev/null +++ b/Stack/2334.Subarray-With-Elements-Greater-Than-Varying-Threshold/Readme.md @@ -0,0 +1,5 @@ +### 2334.Subarray-With-Elements-Greater-Than-Varying-Threshold + +这道题的套路隐藏地非常巧妙。如果我们将nums看成一个histogram,那么本质就是求一个rectange,其面积要大于threshold。 + +于是这就完全转化成了`84-Largest-Rectangle-in-Histogram`,我们只要遍历每个元素,考察它作为矩形的高时,宽的最大范围,再查看area是否大于threshold即可。 diff --git a/Stack/2355.Maximum-Number-of-Books-You-Can-Take/2355.Maximum-Number-of-Books-You-Can-Take.cpp b/Stack/2355.Maximum-Number-of-Books-You-Can-Take/2355.Maximum-Number-of-Books-You-Can-Take.cpp new file mode 100644 index 000000000..906d42e69 --- /dev/null +++ b/Stack/2355.Maximum-Number-of-Books-You-Can-Take/2355.Maximum-Number-of-Books-You-Can-Take.cpp @@ -0,0 +1,33 @@ +using LL = long long; +class Solution { +public: + long long maximumBooks(vector& books) + { + int n = books.size(); + vectordp(n); + stackstk; + + LL ret = 0; + for (int i=0; i books[i]-(i-stk.top())) + stk.pop(); + + if (!stk.empty()) + { + LL d = i - stk.top(); + dp[i] = dp[stk.top()] + ((LL)books[i] + (LL)books[i] - d + 1) * d /2; + } + else + { + LL d = min(i + 1, books[i]); + dp[i] = ((LL)books[i] + (LL)books[i] - d + 1) * d /2; + } + stk.push(i); + + ret = max(ret, dp[i]); + } + + return ret; + } +}; diff --git a/Stack/2355.Maximum-Number-of-Books-You-Can-Take/Readme.md b/Stack/2355.Maximum-Number-of-Books-You-Can-Take/Readme.md new file mode 100644 index 000000000..336508368 --- /dev/null +++ b/Stack/2355.Maximum-Number-of-Books-You-Can-Take/Readme.md @@ -0,0 +1,13 @@ +### 2355.Maximum-Number-of-Books-You-Can-Take + +我们令dp[i]表示以i为结尾的区间所能获得的最大值。显然在这个区间里,位置i处一定会取满books[i]。 + +然后在dp[i]的前提下,我们考察i-1的位置。如果`books[i-1] > books[i]-1`,根据规则意味着无法取满books[i-1],且显然,该位置的取值决定于`books[i]-1`. 类似地,在i-2处,如果`books[i-2] > books[i]-2`,那么该位置能取的最大值是由books[i]-2决定的。依次往前类推,我们会经历一段逐个减1的等差数列。直至我们发现某处`books[j] <= books[i]-(i-j)`,那么我们势必会在j处取books[j],于是自然就有`dp[i] = dp[j] + 长度为i-j且公差为1的等差数列之和`. + +那么对于每个i,我们是否都要靠上述的方法逐一回溯之前的index来确定j的位置呢?实际上,当我们针对某个i确定了j之后,books[j+1:i-1]内的这些元素都可以舍弃不看了。因为未来的任何dp[k](其中k>i)如果依赖于dp[i],那么即`dp[k] = dp[i] + [i+1:k]的等差数列之和`,不需要再计算任何i之前的东西。相反,如果dp[k]不依赖于dp[i],那么意味着在i的位置上没有取满,自然[j+1:i-1]区间内也不会取满(因为这个区间的books值都高于以books[i]开始从后往前的等差序列)。 + +所以我们就可以设计一个单调栈。当考察新元素books[i]时,对于栈顶元素j,如果`books[j] > books[i]-(i-j)`的话,就可以不断退栈: +1. 如果剩余有栈顶元素j,那么`dp[i] = dp[j] + area`,其中area是长度为`L = i-j`,最后一个元素是books[i],公差为1的等差数列之和。于是根据等差数列的求和公式`area = (books[i]-L+1 + books[i]) * L /2`. +2. 如果剩余没有栈顶元素,那么直接有`dp[i] = area`,其中area(可能)是长度为i+1,最后一个元素是books[i],公差为1的等差数列之和。特别注意,等差数列的第一个元素不能小于1。所以,这个等差数列的实际长度应该是`L = min(i+1, heights[i])`. 于是根据等差数列的求和公式`area = (books[i]-L+1 + books[i]) * L /2`. + +最后返回的是全局dp[i]的最大值。 diff --git a/Stack/2434.Using-a-Robot-to-Print-the-Lexicographically-Smallest-String/2434.Using-a-Robot-to-Print-the-Lexicographically-Smallest-String.cpp b/Stack/2434.Using-a-Robot-to-Print-the-Lexicographically-Smallest-String/2434.Using-a-Robot-to-Print-the-Lexicographically-Smallest-String.cpp new file mode 100644 index 000000000..7499ce26a --- /dev/null +++ b/Stack/2434.Using-a-Robot-to-Print-the-Lexicographically-Smallest-String/2434.Using-a-Robot-to-Print-the-Lexicographically-Smallest-String.cpp @@ -0,0 +1,39 @@ +class Solution { +public: + string robotWithString(string s) + { + int n = s.size(); + vectornextSmallest(n,INT_MAX); + char smallest = 'z'+1; + for (int i=n-1; i>=0; i--) + { + smallest = min(smallest, s[i]); + nextSmallest[i] = smallest; + } + + stackst; + string ret; + int i = 0; + while (i secondGreaterElement(vector& nums) + { + stackst1; + stackst2; + + vectorrets(nums.size(), -1); + + for (int i=0; itemp; + while (!st1.empty() && nums[st1.top()] < nums[i]) + { + temp.push_back(st1.top()); + st1.pop(); + } + + reverse(temp.begin(), temp.end()); + for (auto x: temp) + st2.push(x); + + st1.push(i); + } + + return rets; + } +}; diff --git a/Stack/2454.Next-Greater-Element-IV/Readme.md b/Stack/2454.Next-Greater-Element-IV/Readme.md new file mode 100644 index 000000000..4db7f0e98 --- /dev/null +++ b/Stack/2454.Next-Greater-Element-IV/Readme.md @@ -0,0 +1,9 @@ +### 2454.Next-Greater-Element-IV + +我们已经知道,常规的Next Greater Element可以用单调栈实现o(n)的解法。我们维护一个单调递减的栈,如果遇到新元素大于栈顶元素,就意味着栈顶元素遇到了next greater element,于是就可以退栈。 + +在此题里,栈顶元素遇到了next greater selement,并不意味着它就可以一劳永逸地舍弃。我们需要的是the second greater element,于是我们应该对这些元素进行标记,表示他们已经看到了一次next greater。当它们再次遇到greater element的时候,才能记录答案。 + +那么如何标记呢?如果把常规的单调栈记做stk1,那么我们可以把遇到过next greater的元素拿出来,放入另外一个单调栈里,记做stk2。每次新来一个元素nums[i],先看stk2的栈顶元素是否小于num[i],是的话就意味着这些栈顶元素遇到了the second greater element,就可以记录答案并退栈了。接下来看stk1的栈顶元素是否小于nums[i],同理,是的话就意味着这些栈顶元素遇到过了next greater element,并将其移至stk2中。 + +这里要注意一定,将stk1的元素移至stk2的过程中,是否会干扰stk2的单调顺序?是不会的。stk2经过退栈之后,栈顶元素一定是大于nums[i]的;而从stk1转移至stk2的这些元素都是小于nums[i]的,所以我们可以放心将转移的元素都堆在stk1的栈顶,依然能保持stk2的递减性质。 diff --git a/Stack/2751.Robot-Collisions/2751.Robot-Collisions.cpp b/Stack/2751.Robot-Collisions/2751.Robot-Collisions.cpp new file mode 100644 index 000000000..dcdfbe400 --- /dev/null +++ b/Stack/2751.Robot-Collisions/2751.Robot-Collisions.cpp @@ -0,0 +1,54 @@ +class Solution { +public: + vector survivedRobotsHealths(vector& positions, vector& healths, string directions) + { + int n = positions.size(); + vector>robots; + for (int i=0; i>Stack; + for (int i=0; i 0) + Stack.push_back(robots[i]); + } + } + + sort(Stack.begin(), Stack.end(), [](vector&a, vector&b){return a[3]rets; + for (int i=0; i>& nodes) + { + stackstk; + stk.push(nodes[0][0]); + for (int i=1; i& nums) + { + int n = nums.size(); + vector>arr; + int ret = 0; + for (int i=0; isecond; + ret = max(ret, i-k+1); + } + if (arr.empty() || nums[i]>arr.back().first) + arr.push_back({nums[i], i}); + } + return ret; + } +}; diff --git a/Stack/2863.Maximum-Length-of-Semi-Decreasing-Subarrays/2863.Maximum-Length-of-Semi-Decreasing-Subarrays_v2.cpp b/Stack/2863.Maximum-Length-of-Semi-Decreasing-Subarrays/2863.Maximum-Length-of-Semi-Decreasing-Subarrays_v2.cpp new file mode 100644 index 000000000..af933ae09 --- /dev/null +++ b/Stack/2863.Maximum-Length-of-Semi-Decreasing-Subarrays/2863.Maximum-Length-of-Semi-Decreasing-Subarrays_v2.cpp @@ -0,0 +1,24 @@ +class Solution { +public: + int maxSubarrayLength(vector& nums) + { + int n = nums.size(); + stackstk; + for (int i=0; inums[stk.top()]) + stk.push(i); + } + + int ret = 0; + for (int i=n-1; i>=0; i--) + { + while (!stk.empty() && nums[stk.top()] > nums[i]) + { + ret = max(ret, i-stk.top()+1); + stk.pop(); + } + } + return ret; + } +}; diff --git a/Stack/2863.Maximum-Length-of-Semi-Decreasing-Subarrays/Readme.md b/Stack/2863.Maximum-Length-of-Semi-Decreasing-Subarrays/Readme.md new file mode 100644 index 000000000..63ad38d50 --- /dev/null +++ b/Stack/2863.Maximum-Length-of-Semi-Decreasing-Subarrays/Readme.md @@ -0,0 +1,21 @@ +### 2863.Maximum-Length-of-Semi-Decreasing-Subarrays + +此题和 `962.Maximum-Width-Ramp` 一模一样。 + +#### 解法1 +我们维护一个递增的“数组”arr,这是为了方便二分。对于新元素nums[i],我们用二分法在arr里找到第一个大于nums[i]的元素nums[j],于是对于i而言,它的最大跨度就是`i-j+1`. + +如果nums[i]大于数组的尾元素,就加入arr。反之,那么我们就再不用考虑i,这是因为它“又小又晚”,不会为后续的元素带来更大的跨度。 + +#### 解法2 +我们先构造一个单调递增的栈,注意我们从不退栈。方法如下: +```cpp +for (int i=0; inums[stk.top()]) + stk.push(i); +} +``` +这样的做法是我们希望尽量收录更早且更高的元素。任何更晚出现的、更小的元素,都不可能成为最优配对(i,j)中的i。 + +然后我们从后往前遍历nums[i],我们持续退栈直至找到恰好比nums[i]大的元素j。退掉的元素不可惜,因为如果j与i是一个合法配对,那么任何大于j的元素都不会与小于i的元素组成更好的配对。 diff --git a/Stack/2866.Beautiful-Towers-II/2866.Beautiful-Towers-II.cpp b/Stack/2866.Beautiful-Towers-II/2866.Beautiful-Towers-II.cpp new file mode 100644 index 000000000..299b4d379 --- /dev/null +++ b/Stack/2866.Beautiful-Towers-II/2866.Beautiful-Towers-II.cpp @@ -0,0 +1,52 @@ +using LL = long long; +class Solution { +public: + long long maximumSumOfHeights(vector& maxHeights) + { + maxHeights.insert(maxHeights.begin(), 0); + maxHeights.push_back(0); + + vectorleft = helper(maxHeights); + + reverse(maxHeights.begin(), maxHeights.end()); + vectorright = helper(maxHeights); + reverse(right.begin(), right.end()); + + reverse(maxHeights.begin(), maxHeights.end()); + + LL ret = 0; + + for (int i=0; ihelper(vectormaxHeights) + { + int n = maxHeights.size(); + stackstk; + vectorarr(n); + LL sum = 0; + stk.push(0); + arr[i] = 0; + for (int i=1; i maxHeights[i]`时,我们令 +```cpp +p1 = stk.top(); +stk.pop(); +p2 = stk.top(); +``` +我们对栈顶元素p1退栈时,要“回退”的面积其实是`(p1-p2)*maxHeights[p1]`,也就是说,之前[p2+1, p1]这一段最理想的状态是都与maxHeights[p1]平齐,这样既不超过p1的约束,也最大化了总面积。 + +同理,退完p1之后,如果发现`maxHeights[p2] > maxHeights[i]`时,我们依然要继续退栈,同上,退出一段与maxHeights[p2]平齐的高度。 + +当所有的回退完成之后,我们保证了maxHeights[i]高于当前的栈顶元素(假设为pp),那么意味着从[pp+1,i]这段区间我们都可以最大化设置为maxHeights[i]。 + +此时的总面积就是从左往右截止到i位置,为了保持递增关系,能够得到的最大面积,记做left[i]. + +同理,我们将上面的过程反过来,从右往左做一遍,得到从右往左截止到i位置,为了保持递增关系,能够得到的最大面积,记做right[i]. + +那么以i为peak的最大总面积就是`area[i] = left[i]+right[i]-maxHeights[i]`. + +我们在所有的area[i]取全局最大值即可。 + +此题的解法和`084.Largest-Rectangle-in-Histogram`非常类似。 + diff --git a/Stack/3113.Find-the-Number-of-Subarrays-Where-Boundary-Elements-Are-Maximum/3113.Find-the-Number-of-Subarrays-Where-Boundary-Elements-Are-Maximum.cpp b/Stack/3113.Find-the-Number-of-Subarrays-Where-Boundary-Elements-Are-Maximum/3113.Find-the-Number-of-Subarrays-Where-Boundary-Elements-Are-Maximum.cpp new file mode 100644 index 000000000..413f3da6b --- /dev/null +++ b/Stack/3113.Find-the-Number-of-Subarrays-Where-Boundary-Elements-Are-Maximum/3113.Find-the-Number-of-Subarrays-Where-Boundary-Elements-Are-Maximum.cpp @@ -0,0 +1,29 @@ +using LL = long long; +class Solution { +public: + long long numberOfSubarrays(vector& nums) + { + int n = nums.size(); + vectorprevGreater(n, -1); + + stackstk; + for (int i=0; i>Map; + LL ret = 0; + for (int i=0; i& nums) { + int n = nums.size(); + vectornextGreater(n, -1); + vectorprevGreater(n, -1); + vectorst; + for (int i=0; i=0; i--) { + while (!st.empty() && nums[st.back()]=2) ret++; + if (nextGreater[i]!=-1 && nextGreater[i]-i>=2) ret++; + } + return ret; + } +}; diff --git a/Stack/3676.Count-Bowl-Subarrays/Readme.md b/Stack/3676.Count-Bowl-Subarrays/Readme.md new file mode 100644 index 000000000..066f63299 --- /dev/null +++ b/Stack/3676.Count-Bowl-Subarrays/Readme.md @@ -0,0 +1,7 @@ +### 3676.Count-Bowl-Subarrays + +非常有趣的题目。 + +我们很容易想到,对于每个nums[i]考虑其作为一个端点时,另一个端点应该在哪些位置。我们不妨认为nums[i]是左边且较低的端点,那么我们很容易找到对应的next greater element,比如说j,这是下一个可以作为右端点的位置,而这恰恰也是它所对应的唯一的右端点。如果再往右寻找右端点,那么j处在bowl内部就高于了左端点,不符合条件。 + +于是我们就可以得出结论,对于每个nums[i],只有唯一的next greater element可以配对为右边且更高的端点。同理,对于每个nums[i],只有唯一的prev greater element可以配对为左边且更高的端点。这样我们就枚举除了所有的bowl的形状。 diff --git a/Stack/503.Next-Greater-Element-II/503.Next-Greater-Element-II.cpp b/Stack/503.Next-Greater-Element-II/503.Next-Greater-Element-II.cpp index 0f807c37c..8ecafe63e 100644 --- a/Stack/503.Next-Greater-Element-II/503.Next-Greater-Element-II.cpp +++ b/Stack/503.Next-Greater-Element-II/503.Next-Greater-Element-II.cpp @@ -2,28 +2,24 @@ class Solution { public: vector nextGreaterElements(vector& nums) { - int N=nums.size(); - stack>Stack; + int N = nums.size(); + for (int i=0; iStack; vectorresults(N,-1); - for (int j=0; j=N) i=i-N; - - if (Stack.empty() || Stack.top().first>nums[i]) - Stack.push({nums[i],i}); - else + while (!Stack.empty() && nums[Stack.top()]& arr) + { + int n = arr.size(); + vectornextSmaller(n, n); + vectorprevSmaller(n, -1); + + stackStack; + for (int i=0; i arr[i]) + { + nextSmaller[Stack.top()] = i; + Stack.pop(); + } + + if (!Stack.empty()) + prevSmaller[i] = Stack.top(); + Stack.push(i); + } + + + long ret = 0; + long M = 1e9+7; + for (int i=0; i arr[i]) + { + nextSmaller[Stack.top()] = i; + Stack.pop(); + } -[Leetcode Link](https://leetcode.com/problems/sum-of-subarray-minimums) \ No newline at end of file + if (!Stack.empty()) + prevSmaller[i] = Stack.top(); + Stack.push(i); + } +``` + + +[Leetcode Link](https://leetcode.com/problems/sum-of-subarray-minimums) diff --git a/Stack/962.Maximum-Width-Ramp/Readme.md b/Stack/962.Maximum-Width-Ramp/Readme.md index badfd8ada..35c823a92 100644 --- a/Stack/962.Maximum-Width-Ramp/Readme.md +++ b/Stack/962.Maximum-Width-Ramp/Readme.md @@ -20,5 +20,7 @@ ``` 绝妙的下一步是:从后往前依次考察A,对于每个A[i],我们从栈尾依次弹出元素直至遇到一个恰好小于等于A[i]的索引j,那么(j,i)就是关乎A[i]我们能得到的最宽的配对。至于那些已经弹出栈的元素,其实丢了就丢了,并不会对答案有更多的贡献。比如说,j+1和i-1即使配对成功,也不能超越(j,i)的宽度。这样将A从后往前扫一遍,就能找到最宽的配对。 +类似的题有 `2863. Maximum Length of Semi-Decreasing Subarrays` -[Leetcode Link](https://leetcode.com/problems/maximum-width-ramp) \ No newline at end of file + +[Leetcode Link](https://leetcode.com/problems/maximum-width-ramp) diff --git a/String/028.Implement-strStr/028.Implement-strStr-KMP.cpp b/String/028.Implement-strStr/028.Implement-strStr-KMP.cpp index e381ee686..7de750598 100644 --- a/String/028.Implement-strStr/028.Implement-strStr-KMP.cpp +++ b/String/028.Implement-strStr/028.Implement-strStr-KMP.cpp @@ -11,16 +11,16 @@ class Solution { vector suf = preprocess(needle); vectordp(n,0); - dp[0] = (needle[0]==haystack[0]); + dp[0] = (haystack[0]==needle[0]); if (m==1 && dp[0]==1) return 0; for (int i=1; i0 && needle[j]!=haystack[i]) + while (j>0 && (j==needle.size() || haystack[i]!=needle[j])) j = suf[j-1]; - dp[i] = j + (needle[j]==haystack[i]); + dp[i] = j + (haystack[i]==needle[j]); if (dp[i]==needle.size()) return i-needle.size()+1; } diff --git a/String/2168.Unique-Substrings-With-Equal-Digit-Frequency/Readme.md b/String/2168.Unique-Substrings-With-Equal-Digit-Frequency/Readme.md index e44e063d4..0896792da 100644 --- a/String/2168.Unique-Substrings-With-Equal-Digit-Frequency/Readme.md +++ b/String/2168.Unique-Substrings-With-Equal-Digit-Frequency/Readme.md @@ -5,5 +5,5 @@ 对于判定字符串重复,我们有固定的套路,那就是rolling hash,可以将任意长度的字符串编码为一个整数来存储。本题中需要注意的是,为了避免将"012"和"12"都哈希成同一个编码,我们可以将十进制的编码规则改为十一进制,这样字符0也会被编码。即 ```cpp for (int j=i; j=0; i--) + { + if (s[i]!=s[0]) continue; + int left = 1, right = n-i; + while (left < right) + { + int mid = right - (right-left)/2; + if (getHash(s, i, mid) != hashes[mid-1]) + right = mid-1; + else + left = mid; + } + ret += left; + } + + return ret; + } +}; diff --git a/String/2223.Sum-of-Scores-of-Built-Strings/Readme.md b/String/2223.Sum-of-Scores-of-Built-Strings/Readme.md new file mode 100644 index 000000000..309586770 --- /dev/null +++ b/String/2223.Sum-of-Scores-of-Built-Strings/Readme.md @@ -0,0 +1,11 @@ +### 2223.Sum-of-Scores-of-Built-Strings + +#### 解法1:扩展KMP +本题乍看像KMP,但其实不是。KMP给出的dp[i]表示以s[i]结尾的最长后缀,使得其等于s的前缀。而本题需要求的是,以s[i]开头的最长前缀,使得其等于s的前缀。这其实叫做扩展KMP。相关的资料见[OI Wiki](https://oi-wiki.org/string/z-func/) + +#### 解法2:Rolling Hash + Binary Search +另一种更接地气的做法是Rolling Hash。对于任意的位置i,我们想查看子串s[i:n-1]与s相同的最长前缀,不妨暴力地二分尝试。即猜测一个长度len,查看s[i:i+len-1]是否与s[0,len-1]相同。但是此处显然不会无脑地逐个字符去对比,我们只需要用o(1)时间比较这两段区间的Hash值。 + +那么怎么得到任意一段区间的Hash值呢?这就类似于前缀和的思想。我们用hashes[i]表示将字符串前缀s[0:i]哈希为一个26进制数的结果。显然我们可以用o(n)提前预处理,得到所有的hashes[i]。于是对于任意一个区间[i:j],他们对应的hash值就是```hashes[j] - hashes[i-1]*26^(j-i)```. + +Hash的过程中显然会有数值溢出的问题,通常情况下我们要设计MOD在计算的过程中不停地取余。一个取巧的方法就是用无符号长整形unsigned long long,利用自然溢出的特性来省去手工取余。用ULL的另一个好处就是区间的hash值计算里永远不会出现负数,保持了一致性。 diff --git a/String/2261.K-Divisible-Elements-Subarrays/2261.K-Divisible-Elements-Subarrays_v1.cpp b/String/2261.K-Divisible-Elements-Subarrays/2261.K-Divisible-Elements-Subarrays_v1.cpp new file mode 100644 index 000000000..40711f373 --- /dev/null +++ b/String/2261.K-Divisible-Elements-Subarrays/2261.K-Divisible-Elements-Subarrays_v1.cpp @@ -0,0 +1,36 @@ +class Solution { +public: + int countDistinct(vector& nums, int k, int p) + { + int ret = 0; + + for (int len=1; len<=nums.size(); len++) + { + set>Set; + vectorarr; + int count = 0; + + for (int i=0; i=len) + { + arr.erase(arr.begin()); + if (nums[i-len]%p==0) count--; + } + + arr.push_back(nums[i]); + if (nums[i]%p==0) count++; + + if (i>=len-1) + { + if (Set.find(arr)!=Set.end()) continue; + Set.insert(arr); + if (count<=k) + ret += 1; + } + } + } + + return ret; + } +}; diff --git a/String/2261.K-Divisible-Elements-Subarrays/2261.K-Divisible-Elements-Subarrays_v2.cpp b/String/2261.K-Divisible-Elements-Subarrays/2261.K-Divisible-Elements-Subarrays_v2.cpp new file mode 100644 index 000000000..1060f8f13 --- /dev/null +++ b/String/2261.K-Divisible-Elements-Subarrays/2261.K-Divisible-Elements-Subarrays_v2.cpp @@ -0,0 +1,43 @@ +using ULL = unsigned long long; + +class Solution { + ULL power[211]; + ULL B = 201; +public: + int countDistinct(vector& nums, int k, int p) + { + int n = nums.size(); + power[0] = 1; + for (int i=1; iSet; + ULL hash = 0; + int count = 0; + + for (int i=0; i=len) + { + hash = (hash - nums[i-len] * power[len-1] + M ) %M; + count -= (nums[i-len]%p==0); + } + + hash = hash * B + nums[i]; + count += (nums[i]%p==0); + + if (i>=len-1) + { + if (Set.find(hash)!=Set.end()) continue; + Set.insert(hash); + if (count <= k) + ret+=1; + } + } + } + return ret; + } +}; diff --git a/String/2261.K-Divisible-Elements-Subarrays/Readme.md b/String/2261.K-Divisible-Elements-Subarrays/Readme.md new file mode 100644 index 000000000..982303713 --- /dev/null +++ b/String/2261.K-Divisible-Elements-Subarrays/Readme.md @@ -0,0 +1,7 @@ +### 2261.K-Divisible-Elements-Subarrays + +#### 解法1:暴力 +用o(N^2)的算法枚举所有的subarray,然后强行将整个subarray放入一个集合之中判断是否重复。注意,这个集合操作不是o(1). 总的时间复杂度是o(N^3),刚好够。 + +#### 解法2:Rolling Hash +更高效的集合操作是存入subarray的某种编码,而不是subarray本身。这样对集合的写操作和查找操作才是真正的o(1)。考虑到每个元素不超过200,所以我们取201为base,将subarray拼接成一个201进制的数即可。同样,Rolling Hash会遇到数据类型溢出的问题。对于C++而言,采用unsigned long long的自然溢出来实现最大化的取模,是最方便的写法。参见```2223.Sum-of-Scores-of-Built-Strings```. diff --git a/String/2301.Match-Substring-After-Replacement/2301.Match-Substring-After-Replacement_Brute.cpp b/String/2301.Match-Substring-After-Replacement/2301.Match-Substring-After-Replacement_Brute.cpp new file mode 100644 index 000000000..ed30a7934 --- /dev/null +++ b/String/2301.Match-Substring-After-Replacement/2301.Match-Substring-After-Replacement_Brute.cpp @@ -0,0 +1,30 @@ +class Solution { + bool table[256][256]; +public: + bool matchReplacement(string s, string sub, vector>& mappings) + { + int m = s.size(); + + for (auto x: mappings) + { + table[x[0]][x[1]] = 1; + } + + for (int i=0; i>>Map; // 't'-> {'7','8'} + bool table[128][128]; + +public: + bool equal(char x, char y) + { + return (x==y || table[y][x]); + } + + bool matchReplacement(string s, string sub, vector>& mappings) + { + for (auto x: mappings) + table[x[0]][x[1]] = 1; + + return strStr(s, sub)!= -1; + } + + int strStr(string haystack, string needle) + { + int n = haystack.size(); + int m = needle.size(); + if (m==0) return 0; + if (n==0) return -1; + + vector suf = preprocess(needle); + + vectordp(n,0); + dp[0] = equal(haystack[0], needle[0]); + if (m==1 && dp[0]==1) + return 0; + + for (int i=1; i0 && !equal(haystack[i], needle[j])) + j = suf[j-1]; + dp[i] = j + equal(haystack[i], needle[j]); + if (dp[i]==needle.size()) + return i-needle.size()+1; + } + return -1; + } + + bool equal2(char x, char y) + { + if (x==y) return true; + for (int i=0; i<128; i++) + if (table[x][i]==table[y][i]) + return true; + return false; + } + + vector preprocess(string s) + { + int n = s.size(); + vectordp(n,0); + for (int i=1; i=1 && !equal2(s[j],s[i])) + { + j = dp[j-1]; + } + dp[i] = j + equal2(s[j],s[i]); + } + return dp; + } +}; diff --git a/String/2301.Match-Substring-After-Replacement/Readme.md b/String/2301.Match-Substring-After-Replacement/Readme.md new file mode 100644 index 000000000..9a0b59f7b --- /dev/null +++ b/String/2301.Match-Substring-After-Replacement/Readme.md @@ -0,0 +1,13 @@ +### 2301.Match-Substring-After-Replacement + +#### 解法1:暴力 +本题暴力查验字符串匹配,时间是o(N^2),有AC的可能。 + +#### 解法2:KMP +本题的本质其实就是在一个字符串中查找匹配的子串。最直观的高效解法就是KMP。我们只需要略微修改KMP算法中关于“两个字符相等”的定义。 + +在KMP的主函数中,定义一个新的```equal(char a, char b)```. 当两个字符相等,或者sub的字符可以映射到s的字符中时,就返回true。 + +在KMP的preprocessing函数中,定义一个新的```equal2(char a, char b)```. 当两个字符相等,或者这两个字符都可以映射到同一个字符时,就返回true。 + +更新:这道题不存在正确的KMP解法,敬请注意。 diff --git a/String/2468.Split-Message-Based-on-Limit/2468.Split-Message-Based-on-Limit.cpp b/String/2468.Split-Message-Based-on-Limit/2468.Split-Message-Based-on-Limit.cpp new file mode 100644 index 000000000..071db0910 --- /dev/null +++ b/String/2468.Split-Message-Based-on-Limit/2468.Split-Message-Based-on-Limit.cpp @@ -0,0 +1,40 @@ +class Solution { +public: + vector splitMessage(string message, int limit) + { + for (int len=1; 3+2*len < limit; len++) + { + int num = pow(10,len)-1; + int cost = (3+len) * num; + for (int i=1; i<=len; i++) + cost += i * ((pow(10,i)-1) - (pow(10, i-1)-1)); + + if (limit*num - cost >= (int)message.size()) + return get(message, limit, len); + } + + return {}; + } + + vectorget(string message, int limit, int len) + { + vector rets; + int count = 0; + int part = 0; + while (count < message.size()) + { + part++; + int cost = 3+len+to_string(part).size(); + int k = min((int)message.size() - count, limit-cost); + rets.push_back(message.substr(count, k) + "<" + to_string(part) + "/"); + count += k; + } + + for (string& ret: rets) + ret += to_string(part) + ">"; + + return rets; + } + + +}; diff --git a/String/2468.Split-Message-Based-on-Limit/Readme.md b/String/2468.Split-Message-Based-on-Limit/Readme.md new file mode 100644 index 000000000..1200909ed --- /dev/null +++ b/String/2468.Split-Message-Based-on-Limit/Readme.md @@ -0,0 +1,7 @@ +### 2468.Split-Message-Based-on-Limit + +此题有很大的迷惑性,二分法试探切割的份数的做法是错误的。并不是份数越多,就越容易满足要求。比如说,分割成99份,一定比分割成98份可以装下更多的字符,因为多了一个字段。但是分割成100份,不见得能比分割成99份装下更多的字符。这是因为前者每个字段的overhead是``,而后者每个字段的overhead是``,前者每个字段反而要少装1个字符,即使多一个字段,也不见得能弥补得过来(特别是当limit比较小的话)。 + +由此可见,本题的关键其实在于确定“份数”里多有少个数字。如果份数是固定的k位数,那么自然是份数越多,就越容易满足要求。所以我们先估算需要多少位数的份数。如果是一位数,那么我们就查看分成9份最多能装多少字符;如果是两位数,那么我们就查看分成99份最多能装多少字符;如果是三位数,那么我们就查看分成999份最多能装多少字符... 当发现最多能装的字符数大于message的长度,那么我们就知道份数的digit number。 + +如果份数的digit number确定下来之后,比如说是三位数,我们就可以暴力构造答案了。虽然我们暂时不知道具体的份数,但可以用xxx代替。即`*****<1/xxx>`,`*****<2/xxx>`,其中`*****`表示可以填充的字符。不停地添加字段,直至恰好把message都填充完,此时我们就可以确定“份数”了。再将具体的“份数”替换原有的`xxx`就是答案。 diff --git a/String/2781.Length-of-the-Longest-Valid-Substring/2781.Length-of-the-Longest-Valid-Substring.cpp b/String/2781.Length-of-the-Longest-Valid-Substring/2781.Length-of-the-Longest-Valid-Substring.cpp new file mode 100644 index 000000000..97c4358c1 --- /dev/null +++ b/String/2781.Length-of-the-Longest-Valid-Substring/2781.Length-of-the-Longest-Valid-Substring.cpp @@ -0,0 +1,50 @@ +using LL = long long; +class Solution { + unordered_setSet; + unordered_map>Map; +public: + int longestValidSubstring(string word, vector& forbidden) + { + for (auto& s: forbidden) + { + LL code = 0; + for (auto ch: s) + code = (code << 5) + (ch-'a'+1); + Set.insert(code); + } + + for (int len = 1; len<=10; len++) + helper(word, len); + + int n = word.size(); + int rightBound = n; + int ret = 0; + for (int i=n-1; i>=0; i--) + { + if (Map.find(i)!=Map.end()) + { + for (int j: Map[i]) + rightBound = min(rightBound, j); + } + ret = max(ret, rightBound-i); + } + return ret; + + } + + void helper(string&word, int len) + { + int n = word.size(); + LL code = 0; + for (int i=0; i=len) + code &= (1LL<<(5*(len-1)))-1; + + code = (code << 5) + word[i]-'a'+1; + + if (i>=len-1 && Set.find(code)!=Set.end()) + Map[i-len+1].push_back(i); + } + } +}; diff --git a/String/2781.Length-of-the-Longest-Valid-Substring/Readme.md b/String/2781.Length-of-the-Longest-Valid-Substring/Readme.md new file mode 100644 index 000000000..ed33e9eb0 --- /dev/null +++ b/String/2781.Length-of-the-Longest-Valid-Substring/Readme.md @@ -0,0 +1,9 @@ +### 2781.Length-of-the-Longest-Valid-Substring + +注意到forbidden里面的字符串都很短,长度不超过10,我们可以用一个26进制的长度来编码的话,二进制的bit只需要最多50位,故一个64位长整形就能满足。这样,我们在word里走10遍不同长度的固定滑窗,每个滑窗对应一个编码,如果与forbidden里的编码相同,那么就意味着这个滑窗对应一个forbidden里的字符串。这样,我们就可以高效地知道word里所有的forbidden字串的位置。 + +接下来要做的就是在word里找到一个最长的区间,里面不包括任何完整的forbidden字串。这是一个典型的区间问题。我们考虑这样一个子问题:以i为右端点的区间,其左端点最远可以到哪里能符合条件?我们注意到,任何右边界超过i的forbidden子串都不会影响结果(他们肯定不会被完整包含)。所以我们只需要考虑所有右端点不超过i的forbidden子串,为了不完整包括它们中的任何一个,我们显然会取这些forbidden子串的所有左边界里的最大值j,这样[j+1,i]的区间就会符合条件。 + +于是我们可以归纳出算法。将所有forbidden子串按照右边界排序。这样我们从左往右扫描位置i时,就可以将顺次将所有右边界不超过i的子串加入集合,并从中更新它们左边界的最大值j。于是[j+1,i]就对应了以i为右端点、且不包括任何完整forbidden子串的最大区间。 + +同理,如果将所有forbidden子串按照左边界排序,也可以适用同样的算法,只不过从右往左扫一遍。 diff --git a/String/3008.Find-Beautiful-Indices-in-the-Given-Array-II/3008.Find-Beautiful-Indices-in-the-Given-Array-II.cpp b/String/3008.Find-Beautiful-Indices-in-the-Given-Array-II/3008.Find-Beautiful-Indices-in-the-Given-Array-II.cpp new file mode 100644 index 000000000..4c65f9dd0 --- /dev/null +++ b/String/3008.Find-Beautiful-Indices-in-the-Given-Array-II/3008.Find-Beautiful-Indices-in-the-Given-Array-II.cpp @@ -0,0 +1,64 @@ +class Solution { + vector strStr(string haystack, string needle) + { + vectorrets; + + int n = haystack.size(); + int m = needle.size(); + if (m==0) return {}; + if (n==0) return {}; + + vector suf = preprocess(needle); + + vectordp(n,0); + dp[0] = (haystack[0]==needle[0]); + if (m==1 && dp[0]==1) + rets.push_back(0); + + + for (int i=1; i0 && haystack[i]!=needle[j]) + j = suf[j-1]; + dp[i] = j + (haystack[i]==needle[j]); + if (dp[i]==needle.size()) + rets.push_back(i-needle.size()+1); + } + return rets; + } + + vector preprocess(string s) + { + int n = s.size(); + vectordp(n,0); + for (int i=1; i=1 && s[j]!=s[i]) + { + j = dp[j-1]; + } + dp[i] = j + (s[j]==s[i]); + } + return dp; + } + +public: + vector beautifulIndices(string s, string a, string b, int k) + { + vector A = strStr(s,a); + vector B = strStr(s,b); + + vectorrets; + for (int i:A) + { + auto iter1 = lower_bound(B.begin(), B.end(), i-k); + auto iter2 = upper_bound(B.begin(), B.end(), i+k); + if (iter2-iter1>=1) + rets.push_back(i); + } + return rets; + + } +}; diff --git a/String/3008.Find-Beautiful-Indices-in-the-Given-Array-II/Readme.md b/String/3008.Find-Beautiful-Indices-in-the-Given-Array-II/Readme.md new file mode 100644 index 000000000..4b821348d --- /dev/null +++ b/String/3008.Find-Beautiful-Indices-in-the-Given-Array-II/Readme.md @@ -0,0 +1,5 @@ +### 3008.Find-Beautiful-Indices-in-the-Given-Array-II + +很显然,我们只需要找出a在s中出现的所有位置pos1,以及b在s中出现的所有位置pos2。这个用KMP的模板即可。 + +对于pos1中的每个位置i,我们只需要查找`i-k`在pos2里的位置(lower_bound,第一个大于等于该数的迭代器),以及`i+k`在pos2里的位置(upper_bound,第一个大于该数的迭代器),两个位置之差即代表有多少pos2的元素位于[i-k, i+k]之间。 diff --git a/String/3023.Find-Pattern-in-Infinite-Stream-I/3023.Find-Pattern-in-Infinite-Stream-I.cpp b/String/3023.Find-Pattern-in-Infinite-Stream-I/3023.Find-Pattern-in-Infinite-Stream-I.cpp new file mode 100644 index 000000000..e45b8ad7f --- /dev/null +++ b/String/3023.Find-Pattern-in-Infinite-Stream-I/3023.Find-Pattern-in-Infinite-Stream-I.cpp @@ -0,0 +1,56 @@ +/** + * Definition for an infinite stream. + * class InfiniteStream { + * public: + * InfiniteStream(vector bits); + * int next(); + * }; + */ +class Solution { +public: + vector preprocess(vector s) + { + int n = s.size(); + vectordp(n,0); + for (int i=1; i=1 && s[j]!=s[i]) + { + j = dp[j-1]; + } + dp[i] = j + (s[j]==s[i]); + } + return dp; + } + + int findPattern(InfiniteStream* stream, vector& pattern) + { + int m = pattern.size(); + if (m==0) return 0; + + vector suf = preprocess(pattern); + + unordered_mapdp; + + int num = stream->next(); + dp[0] = (num==pattern[0]); + if (m==1 && dp[0]==1) + return 0; + + int i = 1; + while (1) + { + int num = stream->next(); + + int j = dp[i-1]; + while (j>0 && (j==pattern.size() || num!=pattern[j])) + j = suf[j-1]; + dp[i] = j + (num==pattern[j]); + if (dp[i]==m) + return i-pattern.size()+1; + i++; + } + return -1; + } +}; diff --git a/String/3023.Find-Pattern-in-Infinite-Stream-I/Readme.md b/String/3023.Find-Pattern-in-Infinite-Stream-I/Readme.md new file mode 100644 index 000000000..e1736235c --- /dev/null +++ b/String/3023.Find-Pattern-in-Infinite-Stream-I/Readme.md @@ -0,0 +1,5 @@ +### 3023.Find-Pattern-in-Infinite-Stream-I + +此题是KMP的模板题。先将pattern进行预处理求得它的前缀数组`suf`。然后对stream产生的nums[i]逐位处理,利用`suf`计算nums的前缀数组dp[i]。其中dp[i]的定义是以nums[i]结尾的最长后缀,同时也是pattern的前缀。 + +当发现某处的dp[i]等于pattern的长度m时,说明`i-m+1`就是匹配的位置。 diff --git a/String/3031.Minimum-Time-to-Revert-Word-to-Initial-State-II/3031.Minimum-Time-to-Revert-Word-to-Initial-State-II.cpp b/String/3031.Minimum-Time-to-Revert-Word-to-Initial-State-II/3031.Minimum-Time-to-Revert-Word-to-Initial-State-II.cpp new file mode 100644 index 000000000..adcf23444 --- /dev/null +++ b/String/3031.Minimum-Time-to-Revert-Word-to-Initial-State-II/3031.Minimum-Time-to-Revert-Word-to-Initial-State-II.cpp @@ -0,0 +1,35 @@ +class Solution { +public: + vector longestPrefix(string s) + { + int n = s.size(); + vectordp(n); + dp[0] = 0; + + for (int i=1; i=1 && s[j]!=s[i]) + { + j = dp[j-1]; + } + dp[i] = j + (s[j]==s[i]); + } + + return dp; + } + + int minimumTimeToInitialState(string word, int k) + { + int n = word.size(); + vectorlcp = longestPrefix (word); + int len = lcp[n-1]; + while (len!=0 && (n-len)%k!=0) + len = lcp[len-1]; + + if (len!=0) + return (n-len)/k; + else + return (n%k==0)?(n/k):(n/k+1); + } +}; diff --git a/String/3031.Minimum-Time-to-Revert-Word-to-Initial-State-II/3031.Minimum-Time-to-Revert-Word-to-Initial-State-II_v2.cpp b/String/3031.Minimum-Time-to-Revert-Word-to-Initial-State-II/3031.Minimum-Time-to-Revert-Word-to-Initial-State-II_v2.cpp new file mode 100644 index 000000000..30c2e84a4 --- /dev/null +++ b/String/3031.Minimum-Time-to-Revert-Word-to-Initial-State-II/3031.Minimum-Time-to-Revert-Word-to-Initial-State-II_v2.cpp @@ -0,0 +1,12 @@ +class Solution { +public: + int minimumTimeToInitialState(string word, int k) + { + int t=1; + int n = word.size(); + char* s = &(word[0]); + while (t*k longestPrefix(string s) + { + int n = s.size(); + vectordp(n); + dp[0] = 0; + + for (int i=1; i=1 && s[j]!=s[i]) + { + j = dp[j-1]; + } + dp[i] = j + (s[j]==s[i]); + } + + return dp; + } + + void add(TrieNode* root, string& word, unordered_set&Set) + { + TrieNode* node = root; + int n = word.size(); + for (int i=0; inext[word[i]-'a']==NULL) + node->next[word[i]-'a'] = new TrieNode(); + node = node->next[word[i]-'a']; + if (Set.find(i+1)!=Set.end()) + node->count++; + } + } + + int find(TrieNode* root, string& word) + { + TrieNode* node = root; + int n = word.size(); + for (int i=0; inext[word[i]-'a']==NULL) + return 0; + node = node->next[word[i]-'a']; + } + return node->count; + } + + + long long countPrefixSuffixPairs(vector& words) + { + LL ret = 0; + for (int i=words.size()-1; i>=0; i--) + { + ret += find(root, words[i]); + + int n = words[i].size(); + vectorlcp = longestPrefix (words[i]); + unordered_setSet; + int len = lcp[n-1]; + while (len!=0) + { + Set.insert(len); + len = lcp[len-1]; + } + Set.insert(n); + + add(root, words[i], Set); + } + return ret; + } +}; diff --git a/String/3045.Count-Prefix-and-Suffix-Pairs-II/Readme.md b/String/3045.Count-Prefix-and-Suffix-Pairs-II/Readme.md new file mode 100644 index 000000000..180ff71c7 --- /dev/null +++ b/String/3045.Count-Prefix-and-Suffix-Pairs-II/Readme.md @@ -0,0 +1,7 @@ +### 3045.Count-Prefix-and-Suffix-Pairs-II + +对于一对pair里的两个字符串而言,前者是后者的一部分,所以我们必然需要先处理后者。故我们需要从后往前遍历字符串。 + +对于一个字符串s,要使得其他字符串既是它的前缀,也是它的后缀,这是一个比较苛刻的条件。我们很容易联想到KMP算法,可以用线性的时间就能找到s里的所有符合条件的子串(参考`LeetCode 3031 Minimum Time to Revert Word to Initial State II`)。对于这些子串(字符串的“前缀&后缀”),我们必然会将其放入一棵字典树里。这样,对于其他字符串t,我们可以通过搜索字典树进行匹配,从而得知t同时是多少字符串的“前缀&后缀”。 + +在字典树的数据结构里,我们会给每个节点标记count。任何需要填入字典树的子串,我们都会在其路径的最后一个节点的count增1. 对于待查询的字符串t,如果它与字典树里的某个路径匹配,那么该路径的最后一个节点的count就代表了t能配对的字符串的数量。 diff --git a/String/3388.Count-Beautiful-Splits-in-an-Array/3388.Count-Beautiful-Splits-in-an-Array.cpp b/String/3388.Count-Beautiful-Splits-in-an-Array/3388.Count-Beautiful-Splits-in-an-Array.cpp new file mode 100644 index 000000000..87348ffea --- /dev/null +++ b/String/3388.Count-Beautiful-Splits-in-an-Array/3388.Count-Beautiful-Splits-in-an-Array.cpp @@ -0,0 +1,48 @@ +using LL = long long; + +class Solution { + const LL P = 53; + const LL M = 1e9 + 7; + + LL get_hash(const vector& prefix_hash, const vector& p_powers, int i, int j) + { + return (prefix_hash[j] - (prefix_hash[i-1] * p_powers[j-i+1]) % M + M) % M; + } + +public: + int beautifulSplits(vector& nums) + { + int n = nums.size(); + nums.insert(nums.begin(), 0); + int count = 0; + + vector prefix_hash(n+1, 0); + vector p_powers(n+1, 1); + + for (int i = 1; i <= n; i++) + { + prefix_hash[i] = (prefix_hash[i - 1] * P + nums[i]) % M; + p_powers[i] = (p_powers[i - 1] * P) % M; + } + + for (int i = 2; i < n; i++) + for (int j = i + 1; j <= n; j++) + { + LL nums1_hash = get_hash(prefix_hash, p_powers, 1, i-1); + LL nums2_hash = get_hash(prefix_hash, p_powers, i, j-1); + int len1 = i-1; + int len2 = j-i; + int len3 = n-j+1; + + int flag =0; + if ((len2>=len1) && nums1_hash == get_hash(prefix_hash, p_powers, i, i+len1-1)) + flag = 1; + + if (len3>=len2 && nums2_hash == get_hash(prefix_hash, p_powers, j, j+len2-1)) + flag = 1; + count += flag; + } + + return count; + } +}; diff --git a/String/3388.Count-Beautiful-Splits-in-an-Array/Readme.md b/String/3388.Count-Beautiful-Splits-in-an-Array/Readme.md new file mode 100644 index 000000000..5fe95028c --- /dev/null +++ b/String/3388.Count-Beautiful-Splits-in-an-Array/Readme.md @@ -0,0 +1,8 @@ +### 3388.Count-Beautiful-Splits-in-an-Array + +根据n的数量级,考虑如果暴力枚举两处分界点的话,那么需要能以o(1)的时间判定一段subarray是否是另一段subarray的前缀。此时常见的方法只有rolling hash。事实上,每个nums[i]的数值有上限50,故可以类比于字符串的rolling hash,方法应该是可行的。 + +需要注意的几个细节: +1. 每个nums[i]的数值上限是50,故可以选取质数53作为进制。 +2. 一段区间的`hash[i:j] = prefix_hash[j] - prefix_hash[i-1] * power[j-i+1]`,同时取余的过程要始终保证是正数。 +3. 判定subarray1是否subarray2的前缀时,要保证subarray2的长度不能小于subarray1. 同理判定subarray2是否subarray3的前缀,也需要考虑这个约束。 diff --git a/String/467.Unique-Substrings-in-Wraparound-String/467.Unique-Substrings-in-Wraparound-String.cpp b/String/467.Unique-Substrings-in-Wraparound-String/467.Unique-Substrings-in-Wraparound-String.cpp index 39442862e..301212539 100644 --- a/String/467.Unique-Substrings-in-Wraparound-String/467.Unique-Substrings-in-Wraparound-String.cpp +++ b/String/467.Unique-Substrings-in-Wraparound-String/467.Unique-Substrings-in-Wraparound-String.cpp @@ -3,18 +3,20 @@ class Solution { int findSubstringInWraproundString(string p) { unordered_mapMap; + for (int i=0; iMap[p[i]]) - Map[p[i]]=j-i+1; + int i0=i; + while (i+1 wordsAbbreviation(vector& dict) - { - unordered_mapIndex; - for (int i=0; i>Map; - vectorresults(dict.size()); - int abbrNum=0; - - unordered_setSet; - for (int i=0; i1) - { - for (int i=0; i wordsAbbreviation(vector& words) + { + int n = words.size(); + vectorrets(n); + + vectorSet; + for (int i=0; i> Map; + + for (int idx: Set) + { + string abbr = getAbbr(words[idx], abbrNum); + Map[abbr].push_back(idx); + } + Set.clear(); + + for (auto& [abbr, indices]: Map) + { + if (indices.size() > 1) + { + for (int idx: indices) + Set.push_back(idx); + } + else + rets[indices[0]] = abbr; + } + + abbrNum += 1; + if (Set.size() == 0) + break; + } + + return rets; + } + + string getAbbr(string s, int abbrNum) + { + if (s.size() < 3) return s; + + string t; + t = s.substr(0, abbrNum); + t += to_string(s.size() - abbrNum - 1); + t += s.back(); + + if (t.size() == s.size()) return s; + + return t; + } +}; diff --git a/String/556.Next-Greater-Element-III/556.Next-Greater-Element-III.cpp b/String/556.Next-Greater-Element-III/556.Next-Greater-Element-III.cpp deleted file mode 100644 index 2d2ad0681..000000000 --- a/String/556.Next-Greater-Element-III/556.Next-Greater-Element-III.cpp +++ /dev/null @@ -1,43 +0,0 @@ -class Solution { -public: - int nextGreaterElement(int n) - { - if (n==0) return -1; - - vectornum; - while (n>0) - { - num.push_back(n%10); - n=n/10; - } - - vectorp; - p.push_back(num[0]); - int i=1; - while (i=num[i-1]) - { - p.push_back(num[i]); - i++; - } - if (i==num.size()) return -1; // all the digits are descending - - int j=0; - while (p[j]<=num[i]) j++; - swap(num[i],p[j]); - - sort(p.begin(),p.end()); - reverse(p.begin(),p.end()); - - for (int k=0; k=0; i--) - result = result*10+num[i]; - - if (result>INT_MAX) - return -1; - else - return result; - } -}; diff --git a/String/564.Find-the-Closest-Palindrome/564.Find-the-Closest-Palindrome.cpp b/String/564.Find-the-Closest-Palindrome/564.Find-the-Closest-Palindrome.cpp index 4d25336c1..29796c550 100644 --- a/String/564.Find-the-Closest-Palindrome/564.Find-the-Closest-Palindrome.cpp +++ b/String/564.Find-the-Closest-Palindrome/564.Find-the-Closest-Palindrome.cpp @@ -2,115 +2,79 @@ class Solution { public: string nearestPalindromic(string n) { - int N=n.size(); - string s1,s2,s3; - - if (N%2==1) - { - string t=n.substr(0,N/2+1); - long long num=convert(t); - string t1,t2; - - // candidate 1 - t1 = to_string(num); - t2 = t1; - reverse(t2.begin(),t2.end()); - s1 = t1.substr(0,N/2)+t2; - - // candidate 2 - t1 = to_string(num-1); - t2=t1; - reverse(t2.begin(),t2.end()); - s2 = t1.substr(0,N/2)+t2; - - // candidate 3 - t1 = to_string(num+1); - t2=t1; - reverse(t2.begin(),t2.end()); - s3 = t1.substr(0,N/2)+t2; + string a = makeSmaller(n); + string b = makeGreater(n); + if (stoll(b)-stoll(n) >= stoll(n)-stoll(a)) + return a; + else + return b; + } - cout<= 0; i--) { - string t=n.substr(0,N/2); - long long num=convert(t); - string t1,t2; - - //candidate 1 - t1 = n.substr(0,N/2); - reverse(t1.begin(),t1.end()); - s1 = to_string(num)+t1; - - //candidate 2 - t1 = to_string(num-1); - if (t1=="0") - s2="9"; - else if (t1.size()==t.size()) + int d = s[i]-'0'-carry; + if (d>=0) { - t2=t1; - reverse(t2.begin(),t2.end()); - s2=t1+t2; + s[i] = '0'+d; + carry = 0; } - else if (t1.size()!=t.size()) + else { - t2=t1; - reverse(t2.begin(),t2.end()); - s2=t1+'9'+t2; + s[i] = '9'; + carry = 1; } - - //candidate 3 - t1 = to_string(num+1); - if (t1.size()==t.size()) + s[m-1-i] = s[i]; + } + if (s[0]=='0' && m>1) + return string(m - 1, '9'); + else + return s; + } + + string makeGreater(const string &n) + { + const int m = n.length(); + string s = n; + for (int i = 0, j = m - 1; i <= j;) + s[j--] = s[i++]; + if (s > n) { + return s; + } + + int carry = 1; + for (int i = (m - 1)/2; i >= 0; i--) + { + int d = s[i]-'0'+carry; + if (d<=9) { - t2=t1; - reverse(t2.begin(),t2.end()); - s3=t1+t2; + s[i] = '0'+d; + carry = 0; } - else if (t1.size()!=t.size()) + else { - t2=t1; - reverse(t2.begin(),t2.end()); - t1.pop_back(); - s3=t1+t2; + s[i] = '0'; + carry = 1; } - - cout<bitArr; // Note: all arrays are 1-index vectornums; long long M = 1e9+7; - - BIT(int N) + + void init(int N) { this->N = N; bitArr.resize(N+1); @@ -18,7 +18,7 @@ class BIT{ while (idx <= N) { bitArr[idx]+=delta; - bitArr[idx] %= M; + // bitArr[idx] %= M; idx+=idx&(-idx); } } @@ -28,7 +28,7 @@ class BIT{ long long result = 0; while (idx){ result += bitArr[idx]; - result %= M; + // result %= M; idx-=idx&(-idx); } return result; @@ -43,12 +43,13 @@ class BIT{ int main() { int N = 100000; - BIT bit(N); + BIT bit; + bit.init(N); vectornums(N); // cin>> nums .... - + for (int i=1; i> adj[MAXN]; + int up[MAXN][LOGN+1]; + int depth[MAXN]; + ll distRoot[MAXN]; + + void dfs(int cur, int parent) + { + up[cur][0] = parent; + for(auto &[v,w]: adj[cur]) + { + if(v == parent) continue; + depth[v] = depth[cur] + 1; + distRoot[v] = distRoot[cur] + w; + dfs(v, cur); + } + } + + int lca(int a, int b) + { + if(depth[a] < depth[b]) swap(a,b); + int diff = depth[a] - depth[b]; + for(int k = 0; k <= LOGN; k++){ + if(diff & (1<= 0; k--){ + if(up[a][k] != up[b][k]){ + a = up[a][k]; + b = up[b][k]; + } + } + return up[a][0]; + } + + ll dist(int a, int b) + { + int c = lca(a,b); + return distRoot[a] + distRoot[b] - 2*distRoot[c]; + } + + ll stepUp(int u, int k) { + for (int i=LOGN; i>=0; i--) { + if ((k>>i)&1) { + u = up[u][i]; + } + } + return u; + } + + void solve(vector>& edges) { + for (auto& edge: edges) + { + int u = edge[0], v = edge[1], w = edge[2]; + adj[u].push_back({v,w}); + adj[v].push_back({u,w}); + } + + depth[0] = 0; + distRoot[0] = 0; + dfs(0, 0); + + for(int k = 1; k <= LOGN; k++) { + for(int v = 0; v < n; v++) { + up[v][k] = up[up[v][k-1]][k-1]; + } + } + + // Solve your problem. + } diff --git a/Template/Bit_manipulation/Count_bit_1_numbers.cpp b/Template/Bit_manipulation/Count_bit_1_numbers.cpp index 2a58bea73..f7c977484 100644 --- a/Template/Bit_manipulation/Count_bit_1_numbers.cpp +++ b/Template/Bit_manipulation/Count_bit_1_numbers.cpp @@ -1 +1,3 @@ __builtin_popcount(state) + +__builtin_popcountll(state) diff --git a/Template/CPP_LANG/Struct.cpp b/Template/CPP_LANG/Struct.cpp new file mode 100644 index 000000000..903b99e08 --- /dev/null +++ b/Template/CPP_LANG/Struct.cpp @@ -0,0 +1,10 @@ +struct State { + int mask, stage; + double dist; + bool operator<(State const& o) const { + return dist > o.dist; + } +}; + +// The top element is the one with smallest dist. +priority_queuepq; diff --git a/Template/CPP_LANG/fill.cpp b/Template/CPP_LANG/fill.cpp new file mode 100644 index 000000000..303c15b99 --- /dev/null +++ b/Template/CPP_LANG/fill.cpp @@ -0,0 +1,10 @@ +main() { + int arr[10]; + fill(arr, arr+10, 3); + + int arr2[2][5]; + fill(&arr2[0][0], &arr2[0][0]+10, 3); + + vectorarr3(10); + fill(arr3.begin(), arr3.end(), 3); +} diff --git a/Template/Inverse_Element/Inverse_Element.cpp b/Template/Inverse_Element/Inverse_Element.cpp index 7b30a1420..1d2ae4f3b 100644 --- a/Template/Inverse_Element/Inverse_Element.cpp +++ b/Template/Inverse_Element/Inverse_Element.cpp @@ -1,50 +1,41 @@ #include #define LL long long using namespace std; -const LL N = 1e6+7, mod = 998244353; +const LL N = 1e6+7, MOD = 998244353; /*********************************/ // Linear method to compute inv[i] -void main() +vectorcompute_inv(int n) { - LL inv[N]; + vectorinv(n+1); inv[1] = 1; - for(int i=2; i>= 1; + if (N <= 0) { + return 1; } - return ret; + long long y = quickPow(x, N / 2) % MOD; + return N % 2 == 0 ? (y * y % MOD) : (y * y % MOD * x % MOD); } -LL inv(LL x) +long long inv(long long x) { - return quickPow(x, mod - 2); + return quickPow(x, MOD - 2); } /*****************************/ -LL inv(int x) +long long compute_inv(int x) { LL s = 1; - for (; x > 1; x = mod%x) - s = s*(mod-mod/x)%mod; + for (; x > 1; x = MOD%x) + s = s*(MOD-MOD/x)%MOD; return s; } diff --git a/Template/Math/Combination-Number.cpp b/Template/Math/Combination-Number.cpp new file mode 100644 index 000000000..7d71ee0ee --- /dev/null +++ b/Template/Math/Combination-Number.cpp @@ -0,0 +1,116 @@ +using LL = long long; + +/*********************************/ +// Version 1: compute all C(n,m) saved in comb +long long comb[1000][1000]; +for (int i = 0; i <= n; ++i) +{ + comb[i][i] = comb[i][0] = 1; + if (i==0) continue; + for (int j = 1; j < i; ++j) + { + comb[i][j] = comb[i - 1][j - 1] + comb[i - 1][j]; + } +} + +/*********************************/ +// Version 1.5: compute all C(n,m) saved in comb with maximum space capability +int comb[5001][2501]; +int getComb(int m, int n) +{ + if (m factorial; + +vector GetFactorial(LL N) +{ + vectorrets(N+1); + rets[0] = 1; + for (int i=1; i<=N; i++) + rets[i] = rets[i-1] * i % M; + return rets; +} + +long long quickPow(long long x, long long N) { + if (N == 0) { + return 1; + } + LL y = quickPow(x, N / 2) % M; + return N % 2 == 0 ? (y * y % M) : (y * y % M * x % M); +} + +LL comb(LL m, LL n) +{ + if (n>m) return 0; + LL a = factorial[m]; + LL b = factorial[n] * factorial[m-n] % M; + LL inv_b = quickPow(b, (M-2)); + + return a * inv_b % M; +} + +/*********************************/ +// Version 4: Compute C(m,n) on demand with module M +const LL MOD = 1e9 + 7; +vector factorial; +vector GetFactorial(LL N) +{ + vectorrets(N+1); + rets[0] = 1; + for (int i=1; i<=N; i++) + rets[i] = rets[i-1] * i % MOD; + return rets; +} + +long long quickPow(long long x, long long N) { + if (N == 0) { + return 1; + } + LL y = quickPow(x, N / 2) % MOD; + return N % 2 == 0 ? (y * y % MOD) : (y * y % MOD * x % MOD); +} + +LL comb(LL m, LL n) +{ + if (n>m) return 0; + LL a = factorial[m]; + LL b = factorial[n] * factorial[m-n] % MOD; + LL inv_b = quickPow(b, (MOD-2)); + + return a * inv_b % MOD; +} diff --git a/Template/Math/Lucas.cpp b/Template/Math/Lucas.cpp new file mode 100644 index 000000000..27c293bd0 --- /dev/null +++ b/Template/Math/Lucas.cpp @@ -0,0 +1,20 @@ +/* + To compute C(n,m) % p, when p is a small prime (and we cannot use Fermat's Little Theorem) + https://oi-wiki.org/math/number-theory/lucas/ +*/ + +long long Lucas(long long n, long long m, long long p) { + if (m == 0) return 1; + return (C(n % p, m % p) * Lucas(n / p, m / p, p)) % p; +} + +long long C(int n, int m) +{ + long long cnt = 1; + for(int i = 0; i < m; i++) + { + cnt *= n - i; + cnt /= i + 1; + } + return cnt; +} diff --git a/Template/Math/Primes.cpp b/Template/Math/Primes.cpp index d3e1ac207..9c8f08d03 100644 --- a/Template/Math/Primes.cpp +++ b/Template/Math/Primes.cpp @@ -1,20 +1,18 @@ // Find all primes <= n. vectorEratosthenes(int n) { - auto q=vector(n+1,0); + vectorq(n+1,0); + vectorprimes; for (int i=2; i<=sqrt(n); i++) { - if (q[i]==0) + if (q[i]==1) continue; + int j=i*2; + while (j<=n) { - int j=i*2; - while (j<=n) - { - q[j]=1; - j+=i; - } + q[j]=1; + j+=i; } - } - vectorprimes; + } for (int i=2; i<=n; i++) { if (q[i]==0) diff --git a/Template/Math/QuickPow.cpp b/Template/Math/QuickPow.cpp index e03f18f17..481124e39 100644 --- a/Template/Math/QuickPow.cpp +++ b/Template/Math/QuickPow.cpp @@ -1,5 +1,14 @@ class Solution { +long long MOD = 1e9+7; public: + long long quickMul(long long x, long long N) { + if (N == 0) { + return 1; + } + LL y = quickMul(x, N / 2) % MOD; + return N % 2 == 0 ? (y * y % MOD) : (y * y % MOD * x % MOD); + } + double quickMul(double x, long long N) { if (N == 0) { return 1.0; @@ -8,8 +17,8 @@ class Solution { return N % 2 == 0 ? y * y : y * y * x; } - double myPow(double x, int n) { - long long N = n; - return N >= 0 ? quickMul(x, N) : 1.0 / quickMul(x, -N); + // n can be negative. + double myPow(double x, int n) { + return n >= 0 ? quickMul(x, n) : 1.0 / quickMul(x, -n); } }; diff --git a/Template/SegmentTree/SegmentTree_Basic.cpp b/Template/SegmentTree/SegmentTree_Basic.cpp deleted file mode 100644 index b04fcffe8..000000000 --- a/Template/SegmentTree/SegmentTree_Basic.cpp +++ /dev/null @@ -1,80 +0,0 @@ -// LeetCode 307. Range Sum Query - Mutable - -class NumArray { - class SegTreeNode - { - public: - SegTreeNode* left; - SegTreeNode* right; - int start, end; - int info; - SegTreeNode(int a, int b):start(a),end(b),info(0),left(NULL),right(NULL){} - }; - - void init(SegTreeNode* node, int a, int b) // init for range [a,b] - { - if (a==b) - { - node->info = nums[a]; - return; - } - int mid = (a+b)/2; - if (node->left==NULL) - { - node->left = new SegTreeNode(a, mid); - node->right = new SegTreeNode(mid+1, b); - } - init(node->left, a, mid); - init(node->right, mid+1, b); - - node->info = node->left->info + node->right->info; // write your own logic - } - - void updateSingle(SegTreeNode* node, int id, int val) - { - if (id < node->start || id > node->end ) return; - if (node->start == node->end) - { - node->info = val; - return; - } - updateSingle(node->left, id, val); - updateSingle(node->right, id, val); - node->info = node->left->info + node->right->info; // write your own logic - } - - int queryRange(SegTreeNode* node, int a, int b) - { - if (b < node->start || a > node->end ) - { - return 0; // write your own logic - } - if (a <= node->start && b>=node->end) - { - return node->info; // write your own logic - } - return queryRange(node->left, a, b) + queryRange(node->right, a, b); // write your own logic - } - - vector nums; - SegTreeNode* root; - -public: - NumArray(vector nums) - { - this->nums = nums; - root = new SegTreeNode(0, nums.size()-1); - init(root, 0, nums.size()-1); - } - - void update(int i, int val) - { - updateSingle(root, i, val); - } - - int sumRange(int i, int j) - { - return queryRange(root, i, j); - } -}; - diff --git a/Template/SegmentTree/SegmentTree_LazyTag.cpp b/Template/SegmentTree/SegmentTree_LazyTag.cpp deleted file mode 100644 index add214c04..000000000 --- a/Template/SegmentTree/SegmentTree_LazyTag.cpp +++ /dev/null @@ -1,101 +0,0 @@ -// LeetCode 370. Range Addition - -class Solution { - class SegTreeNode - { - public: - SegTreeNode* left; - SegTreeNode* right; - int start, end; - int info; - int tag; - SegTreeNode(int a, int b):start(a),end(b),info(0),tag(0),left(NULL),right(NULL){} - }; - - void init(SegTreeNode* node, int a, int b) // init for range [a,b] - { - if (a==b) - { - node->info = 0; - return; - } - int mid = (a+b)/2; - if (node->left==NULL) - { - node->left = new SegTreeNode(a, mid); - node->right = new SegTreeNode(mid+1, b); - } - init(node->left, a, mid); - init(node->right, mid+1, b); - - node->info = node->left->info + node->right->info; // write your own logic - } - - void updateRangeBy(SegTreeNode* node, int a, int b, int val) - { - if (b < node->start || a > node->end ) return; - if (a <= node->start && node->end <=b) - { - // write your own logic - node->info += val * len(node); - node->tag += val; - return; - } - - pushDown(node); - updateRangeBy(node->left, a, b, val); - updateRangeBy(node->right, a, b, val); - - node->info = node->left->info + node->right->info; // write your own logic - } - - int len(SegTreeNode* node) - { - return node->end - node->start + 1; - } - - void pushDown(SegTreeNode* node) - { - if (node->tag!=0) - { - node->left->info += len(node->left) * node->tag; - node->right->info += len(node->right) * node->tag; - node->left->tag += node->tag; - node->right->tag += node->tag; - node->tag = 0; - } - } - - int queryRange(SegTreeNode* node, int a, int b) - { - if (b < node->start || a > node->end ) - { - return 0; // write your own logic - } - if (a <= node->start && b>=node->end) - { - return node->info; // write your own logic - } - pushDown(node); - return queryRange(node->left, a, b) + queryRange(node->right, a, b); // write your own logic - } - - SegTreeNode* root; - -public: - vector getModifiedArray(int length, vector>& updates) - { - SegTreeNode* root = new SegTreeNode(0, length-1); - - init(root, 0, length-1); - - for (auto& update: updates) - { - updateRangeBy(root, update[0], update[1], update[2]); - } - vectorrets(length); - for (int i=0; iinfo = 0; - return; - } - int mid = (a+b)/2; - if (node->left==NULL) - { - node->left = new SegTreeNode(a, mid); - node->right = new SegTreeNode(mid+1, b); - } - init(node->left, a, mid); - init(node->right, mid+1, b); - - node->info = 0; // write your own logic - } - - void updateRange(SegTreeNode* node, int a, int b, int val) - { - if (b < node->start || a > node->end ) // no intersection - return; - if (a <= node->start && node->end <=b) - { - node->info = val; - node->tag = 1; - return; - } - - pushDown(node); - updateRange(node->left, a, b, val); - updateRange(node->right, a, b, val); - - node->info = max(node->left->info, node->right->info); // write your own logic - } - - int queryRange(SegTreeNode* node, int a, int b) - { - if (b < node->start || a > node->end ) - { - return 0; // write your own logic - } - if (a <= node->start && b>=node->end) - { - return node->info; // write your own logic - } - pushDown(node); - return max(queryRange(node->left, a, b), queryRange(node->right, a, b)); // write your own logic - } - - void pushDown(SegTreeNode* node) - { - if (node->tag==true) - { - node->left->info = node->info; - node->right->info = node->info; - node->left->tag = 1; - node->right->tag = 1; - node->tag = 0; - } - } - - -public: - vector fallingSquares(vector>& positions) - { - setSet; - for (auto & rect: positions) - { - Set.insert(rect[0]); - Set.insert(rect[0]+rect[1]); - } - unordered_mappos2idx; - int idx = 0; - for (auto x: Set) - { - pos2idx[x] = idx; - idx++; - } - int n = pos2idx.size(); - - SegTreeNode* root = new SegTreeNode(0, n-1); - init(root, 0, n-1); - - int maxH = 0; - vectorrets; - for (auto & rect: positions) - { - int a = pos2idx[rect[0]]; - int b = pos2idx[rect[0]+rect[1]]; - int h = queryRange(root, a, b-1); // [a,b) - updateRange(root, a, b-1, h + rect[1]); - maxH = max(maxH, h + rect[1]); - rets.push_back(maxH); - } - return rets; - } -}; diff --git a/Template/SegmentTree/range_bitwise_and.cpp b/Template/SegmentTree/range_bitwise_and.cpp new file mode 100644 index 000000000..ccc023344 --- /dev/null +++ b/Template/SegmentTree/range_bitwise_and.cpp @@ -0,0 +1,68 @@ +class SegmentTree { +private: + vector tree; + int n; + + void build(vector& nums, int node, int start, int end) + { + if (start == end) { + tree[node] = nums[start]; + } else { + int mid = (start + end) / 2; + build(nums, 2 * node, start, mid); + build(nums, 2 * node + 1, mid + 1, end); + tree[node] = tree[2 * node] & tree[2 * node + 1]; + } + } + + void update(int node, int start, int end, int L, int R, int val) + { + if (R < start || end < L) { + return; + } + if (L <= start && end <= R) { + tree[node] = val; + return; + } + int mid = (start + end) / 2; + update(2 * node, start, mid, L, R, val); + update(2 * node + 1, mid + 1, end, L, R, val); + tree[node] = tree[2 * node] & tree[2 * node + 1]; + } + + int query(int node, int start, int end, int L, int R) + { + if (R < start || end < L) { + return INT_MAX; // Identity for AND operation (all bits set) + } + if (L <= start && end <= R) { + return tree[node]; + } + int mid = (start + end) / 2; + int leftAnd = query(2 * node, start, mid, L, R); + int rightAnd = query(2 * node + 1, mid + 1, end, L, R); + return leftAnd & rightAnd; + } + +public: + SegmentTree(vector& nums) { + n = nums.size(); + tree.resize(4 * n, 0); + build(nums, 1, 0, n - 1); + } + + void rangeUpdate(int L, int R, int val) { + update(1, 0, n - 1, L, R, val); + } + + int rangeAnd(int L, int R) { + return query(1, 0, n - 1, L, R); + } +}; + +int main() +{ + int n = nums.size(); + SegmentTree segTree(nums); + int ret = segTree.rangeAnd(a, b); +} diff --git a/Template/SegmentTree/range_max.cpp b/Template/SegmentTree/range_max.cpp new file mode 100644 index 000000000..e0d6fe36c --- /dev/null +++ b/Template/SegmentTree/range_max.cpp @@ -0,0 +1,117 @@ +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + int info; // the maximum value of the range + bool tag; + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + tag = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = max(left->info, right->info); // check with your own logic + } + } + + SegTreeNode(int a, int b, vector& val) // init for range [a,b] with the same-size array val + { + tag = 0; + info = 0; + start = a, end = b; + if (a==b) + { + info = val[a]; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = max(left->info, right->info); // check with your own logic + } + } + + void pushDown() + { + if (tag==1 && left) + { + left->info = info; + right->info = info; + left->tag = 1; + right->tag = 1; + tag = 0; + } + } + + void updateRange(int a, int b, int val) // set range [a,b] with val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info = val; + tag = 1; + return; + } + + if (left) + { + pushDown(); + left->updateRange(a, b, val); + right->updateRange(a, b, val); + info = max(left->info, right->info); // write your own logic + } + } + + int queryRange(int a, int b) // query the maximum value within range [a,b] + { + if (b < start || a > end ) + { + return INT_MIN/2; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + int ret = max(left->queryRange(a, b), right->queryRange(a, b)); + info = max(left->info, right->info); // check with your own logic + return ret; + } + + return info; // should not reach here + } + +}; + +int main() +{ + SegTreeNode* root = new SegTreeNode(0, length-1, initVals); // Set the leaf nodes with initVals. + + for (auto& update: updates) + { + int start = update[0], end = update[1], val = update[2]; + root->updateRange(start, end ,val); // set the range [start, end] with val + } + + vectorrets(length); + for (int i=0; iqueryRange(i, i); // get single node val + return rets; +} diff --git a/Template/SegmentTree/range_min.cpp b/Template/SegmentTree/range_min.cpp new file mode 100644 index 000000000..5d39bde09 --- /dev/null +++ b/Template/SegmentTree/range_min.cpp @@ -0,0 +1,117 @@ +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + int info; // the maximum value of the range + bool tag; + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + tag = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = min(left->info, right->info); // check with your own logic + } + } + + SegTreeNode(int a, int b, vector& val) // init for range [a,b] with the same-size array val + { + tag = 0; + info = 0; + start = a, end = b; + if (a==b) + { + info = val[a]; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = min(left->info, right->info); // check with your own logic + } + } + + void pushDown() + { + if (tag==1 && left) + { + left->info = info; + right->info = info; + left->tag = 1; + right->tag = 1; + tag = 0; + } + } + + void updateRange(int a, int b, int val) // set range [a,b] with val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info = val; + tag = 1; + return; + } + + if (left) + { + pushDown(); + left->updateRange(a, b, val); + right->updateRange(a, b, val); + info = min(left->info, right->info); // write your own logic + } + } + + int queryRange(int a, int b) // query the maximum value within range [a,b] + { + if (b < start || a > end ) + { + return INT_MAX/2; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + int ret = min(left->queryRange(a, b), right->queryRange(a, b)); + info = min(left->info, right->info); // check with your own logic + return ret; + } + + return info; // should not reach here + } + +}; + +int main() +{ + SegTreeNode* root = new SegTreeNode(0, length-1, initVals); // Set the leaf nodes with initVals. + + for (auto& update: updates) + { + int start = update[0], end = update[1], val = update[2]; + root->updateRange(start, end ,val); // set the range [start, end] with val + } + + vectorrets(length); + for (int i=0; iqueryRange(i, i); // get single node val + return rets; +} diff --git a/Template/SegmentTree/range_module.cpp b/Template/SegmentTree/range_module.cpp new file mode 100644 index 000000000..98b214af6 --- /dev/null +++ b/Template/SegmentTree/range_module.cpp @@ -0,0 +1,90 @@ +// 支持动态增减节点 + +class SegTree +{ + public: + int start, end; + bool status; + SegTree* left; + SegTree* right; + SegTree(int a, int b, bool T):start(a),end(b),status(T),left(NULL),right(NULL){} + + void remove(SegTree* &node) + { + if (node==NULL) return; + remove(node->left); + remove(node->right); + delete node; + node = NULL; + return; + } + + void setStatus(int a, int b, bool T) + { + if (a<=start && b>=end) // bottom condition 1: [a,b)>[start,end) + { + remove(left); + remove(right); + status = T; + return; + } + if (a>=end || b<=start) // bottom condition 2: [a,b) do not intersect with [start,end) + return; + int mid = start+(end-start)/2; + if (left==NULL) // no children? create them! + { + left = new SegTree(start,mid,status); + right = new SegTree(mid,end,status); + } + left->setStatus(a,b,T); + right->setStatus(a,b,T); + status =left->status && right->status; + } + + bool getStatus(int a, int b) + { + if (a<=start && b>=end) // bottom condition 1: [a,b)>[start,end) + return status; + if (a>=end || b<=start) // bottom condition 2: [a,b) do not intersect with [start,end) + return true; + if (left==NULL) + return status; + int mid = start+(end-start)/2; + bool L = left->getStatus(a,b); + bool R = right->getStatus(a,b); + return L&&R; + } +}; + +class RangeModule { +public: + + SegTree root = SegTree(0,1e9,false); + + RangeModule() + { + } + + void addRange(int left, int right) + { + root.setStatus(left,right,true); + } + + bool queryRange(int left, int right) + { + return root.getStatus(left,right); + } + + void removeRange(int left, int right) + { + root.setStatus(left,right,false); + } +}; + +/** + * Your RangeModule object will be instantiated and called as such: + * RangeModule obj = new RangeModule(); + * obj.addRange(left,right); + * bool param_2 = obj.queryRange(left,right); + * obj.removeRange(left,right); + */ diff --git a/Template/SegmentTree/range_sum.cpp b/Template/SegmentTree/range_sum.cpp new file mode 100644 index 000000000..4ebcb2d98 --- /dev/null +++ b/Template/SegmentTree/range_sum.cpp @@ -0,0 +1,121 @@ +using LL = long long; +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + LL info; // the sum value over the range + bool lazy_tag; + LL lazy_val; + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + lazy_tag = 0; + lazy_val = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + SegTreeNode(int a, int b, vector& val) // init for range [a,b] with the same-size array val + { + lazy_tag = 0; + lazy_val = 0; + start = a, end = b; + if (a==b) + { + info = val[a]; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + void pushDown() + { + if (lazy_tag==1 && left) + { + left->info = lazy_val * (left->end - left->start + 1); + right->info = lazy_val * (right->end - right->start + 1); + left->lazy_tag = 1; left->lazy_val = lazy_val; + right->lazy_tag = 1; right->lazy_val = lazy_val; + lazy_tag = 0; lazy_val = 0; + } + } + + void updateRange(int a, int b, int val) // set range [a,b] with val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info = val * (end-start+1); + lazy_tag = 1; + lazy_val = val; + return; + } + + if (left) + { + pushDown(); + left->updateRange(a, b, val); + right->updateRange(a, b, val); + info = left->info + right->info; // write your own logic + } + } + + LL queryRange(int a, int b) // query the sum over range [a,b] + { + if (b < start || a > end ) + { + return 0; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + LL ret = left->queryRange(a, b) + right->queryRange(a, b); + info = left->info + right->info; // check with your own logic + return ret; + } + + return info; // should not reach here + } +}; + +int main() +{ + SegTreeNode* root = new SegTreeNode(0, length-1, initVals); // Set the leaf nodes with initVals. + + for (auto& update: updates) + { + int start = update[0], end = update[1], val = update[2]; + root->updateRange(start, end ,val); // set the range [start, end] with val + } + + for (auto& query: queries) + { + int start = query[0], end = query[1]; + ret[i] = root->queryRange(start, end); // get the range sum over [start, end] + } +} diff --git a/Template/SegmentTree/range_sum_increase_by.cpp b/Template/SegmentTree/range_sum_increase_by.cpp new file mode 100644 index 000000000..92bba4ce0 --- /dev/null +++ b/Template/SegmentTree/range_sum_increase_by.cpp @@ -0,0 +1,127 @@ +using LL = long long; +LL M = 1e9+7; +class SegTreeNode +{ + public: + SegTreeNode* left = NULL; + SegTreeNode* right = NULL; + int start, end; + LL info; // the sum value over the range + LL delta; + bool tag; + + SegTreeNode(int a, int b, int val) // init for range [a,b] with val + { + tag = 0; + delta = 0; + start = a, end = b; + if (a==b) + { + info = val; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + SegTreeNode(int a, int b, vector& val) // init for range [a,b] with the same-size array val + { + tag = 0; + delta = 0; + start = a, end = b; + if (a==b) + { + info = val[a]; + return; + } + int mid = (a+b)/2; + if (left==NULL) + { + left = new SegTreeNode(a, mid, val); + right = new SegTreeNode(mid+1, b, val); + info = left->info + right->info; // check with your own logic + } + } + + void pushDown() + { + if (tag==1 && left) + { + left->info += delta * (left->end - left->start + 1); + left->delta += delta; + right->info += delta * (right->end - right->start + 1); + right->delta += delta; + left->tag = 1; + right->tag = 1; + tag = 0; + delta = 0; + } + } + + void updateRangeBy(int a, int b, int val) // increase range [a,b] by val + { + if (b < start || a > end ) // not covered by [a,b] at all + return; + if (a <= start && end <=b) // completely covered within [a,b] + { + info += val * (end-start+1); + delta += val; + tag = 1; + return; + } + + if (left) + { + pushDown(); + left->updateRangeBy(a, b, val+delta); + right->updateRangeBy(a, b, val+delta); + delta = 0; + tag = 0; + info = left->info + right->info; // write your own logic + } + } + + LL queryRange(int a, int b) // query the sum within range [a,b] + { + if (b < start || a > end ) + { + return 0; // check with your own logic + } + if (a <= start && end <=b) + { + return info; // check with your own logic + } + + if (left) + { + pushDown(); + LL ret = left->queryRange(a, b) + right->queryRange(a, b); + info = left->info + right->info; // check with your own logic + return ret; + } + + return info; // should not reach here + } +}; + +int main() +{ + SegTreeNode* root = new SegTreeNode(0, length-1, initVals); // Set the leaf nodes with initVals. + + for (auto& update: updates) + { + int start = update[0], end = update[1], val = update[2]; + root->updateRange(start, end ,val); // increase the range [start, end] by val + } + + for (auto& query: queries) + { + int start = query[0], end = query[1]; + ret[i] = root->queryRange(start, end); // get the range sum over [start, end] + } +} diff --git a/Template/Union_Find/union_find.cpp b/Template/Union_Find/union_find.cpp new file mode 100644 index 000000000..ff864f0e7 --- /dev/null +++ b/Template/Union_Find/union_find.cpp @@ -0,0 +1,23 @@ +struct DSU { + vector p, r; + DSU(int n): p(n,-1), r(n,0) { + iota(p.begin(), p.end(), 0); + } + int find(int x) { + return p[x]==x ? x : p[x]=find(p[x]); + } + bool unite(int a, int b) { + a = find(a); b = find(b); + if (a == b) return false; + if (r[a] < r[b]) swap(a,b); + p[b] = a; + if (r[a]==r[b]) ++r[a]; + return true; + } +}; + +int main { + DSU dsu(n); + dsu.unite(u,v); + if (dsu.find(u)==dsu.find(v) {} +} diff --git a/Thinking/2860.Happy-Students/2860.Happy-Students.cpp b/Thinking/2860.Happy-Students/2860.Happy-Students.cpp new file mode 100644 index 000000000..f69cd1bbe --- /dev/null +++ b/Thinking/2860.Happy-Students/2860.Happy-Students.cpp @@ -0,0 +1,22 @@ +class Solution { +public: + int countWays(vector& nums) + { + sort(nums.begin(), nums.end()); + int n = nums.size(); + + int ret = 0; + for (int i=0; i+1 nums[i]) && (i+1 < nums[i+1])) + ret++; + } + + if (0 < nums[0]) + ret++; + if (n > nums[n-1]) + ret++; + + return ret; + } +}; diff --git a/Thinking/2860.Happy-Students/Readme.md b/Thinking/2860.Happy-Students/Readme.md new file mode 100644 index 000000000..9c9f9d793 --- /dev/null +++ b/Thinking/2860.Happy-Students/Readme.md @@ -0,0 +1,9 @@ +### 2860.Happy-Students + +我们发现,将nums按照从小到大排序后,如果第i个同学选中并且happy,那么比他小的同学必须选中才能happy。如果第j个同学没选中并且happy,那么比他大的同学也一定要不被选中才能happy。 + +因为所有的同学都happy,这就告诉我们,所有选中的同学必然是相邻的,所有没有选中的同学必然是相邻的。所以我们需要找到这个分界点。只需要遍历所有的间隔位置,判断如果左边选中、右边不选中,是否能够满足让他们两个happy(其他人自然自动满足)。 + +注意这样的分界点没有连续性,它可能离散地出现在任何位置。 + +此外注意全部选中和全部不选两种特殊情况。 diff --git a/Thinking/2862.Maximum-Element-Sum-of-a-Complete-Subset-of-Indices/2862.Maximum-Element-Sum-of-a-Complete-Subset-of-Indices.cpp b/Thinking/2862.Maximum-Element-Sum-of-a-Complete-Subset-of-Indices/2862.Maximum-Element-Sum-of-a-Complete-Subset-of-Indices.cpp new file mode 100644 index 000000000..e46a3de45 --- /dev/null +++ b/Thinking/2862.Maximum-Element-Sum-of-a-Complete-Subset-of-Indices/2862.Maximum-Element-Sum-of-a-Complete-Subset-of-Indices.cpp @@ -0,0 +1,21 @@ +using LL = long long; +class Solution { +public: + long long maximumSum(vector& nums) + { + int n = nums.size(); + + int k = 1; + LL ret = 0; + while (k<=n) + { + LL sum = 0; + for (int i=1; k*i*i<=n; i++) + sum += nums[k*i*i-1]; + ret = max(ret, sum); + k++; + } + + return ret; + } +}; diff --git a/Thinking/2862.Maximum-Element-Sum-of-a-Complete-Subset-of-Indices/Readme.md b/Thinking/2862.Maximum-Element-Sum-of-a-Complete-Subset-of-Indices/Readme.md new file mode 100644 index 000000000..00d2b3b4e --- /dev/null +++ b/Thinking/2862.Maximum-Element-Sum-of-a-Complete-Subset-of-Indices/Readme.md @@ -0,0 +1,15 @@ +### 2862.Maximum-Element-Sum-of-a-Complete-Subset-of-Indices + +我们分析一下“任意两个下标i与j的乘积是完全平方数”的含义。我们将i分解为`i=a*x^2`,其中x^2是i里包含的最大平方因子。同理,分解`j=b*y^2`。为了使得```i*j```依然是平方数,那么必然要求`a==b`. 同理,与{i,j}属于同一个集合里的其他下标元素,必然也必须能分解为`a*z^2`的形式。 + +所以为了最大化这个集合(不仅指集合元素的数目,也指element-sum),集合元素里的那些“最大平方因子”必然是`1^2, 2^2, 3^3, 4^2 ... `直至n。然后我们再穷举`a=1,2,3...`. 就可以构造出所有可能的最优集合,即 + +```1*1, 1*4,1*9, 1*16, ...``` + +```2*1, 2*4,2*9, 2*16, ...``` + +```3*1, 3*4,3*9, 3*16, ...``` + +直至集合最小元素的上限是n。 + +那么我们穷举这些元素的时间复杂度是多少呢?对于`*1`而言,我们穷举了n次。对于`*4`而言,我们穷举了n/4次。对于`*9`而言,我们穷举了n/9次。所以总的穷举数目为`n/1 + n/4 + n/9 + ...`,它是和小于2n的序列。故总的时间复杂度是o(N). diff --git a/Thinking/2910.Minimum-Number-of-Groups-to-Create-a-Valid-Assignment/2910.Minimum-Number-of-Groups-to-Create-a-Valid-Assignment.cpp b/Thinking/2910.Minimum-Number-of-Groups-to-Create-a-Valid-Assignment/2910.Minimum-Number-of-Groups-to-Create-a-Valid-Assignment.cpp new file mode 100644 index 000000000..811db33dc --- /dev/null +++ b/Thinking/2910.Minimum-Number-of-Groups-to-Create-a-Valid-Assignment/2910.Minimum-Number-of-Groups-to-Create-a-Valid-Assignment.cpp @@ -0,0 +1,36 @@ +class Solution { +public: + int minGroupsForValidAssignment(vector& nums) + { + int n = nums.size(); + unordered_mapMap; + for (int x: nums) Map[x]++; + + int m = INT_MAX; + for (auto [_, x]: Map) + m = min(m, x); + + for (int k=m+1; k>=1; k--) + { + int count = 0; + for (auto [_, x]: Map) + { + int q = x / k; + int r = x % k; + if (r==0 || k-r <= q+1) + { + count += ceil(x*1.0 / k); + } + else + { + count = -1; + break; + } + + } + if (count != -1) return count; + } + + return 0; + } +}; diff --git a/Thinking/2910.Minimum-Number-of-Groups-to-Create-a-Valid-Assignment/Readme.md b/Thinking/2910.Minimum-Number-of-Groups-to-Create-a-Valid-Assignment/Readme.md new file mode 100644 index 000000000..326690df2 --- /dev/null +++ b/Thinking/2910.Minimum-Number-of-Groups-to-Create-a-Valid-Assignment/Readme.md @@ -0,0 +1,11 @@ +### 2910.Minimum-Number-of-Groups-to-Create-a-Valid-Assignment + +我们首先将所有元素的频率收集起来放入一个数组arr。本题就是将arr里的每个元素都做拆分,要求最多只能拆分出两种相邻的数字(记做k和k-1)。求最少能拆分出多少个数字来。 + +本题的一个坑就是二分搜索是不成立的。这是说拆分的越多就越容易,这里没有单调性。比如说,[10,20]可以拆分出k=10, 即[10,10,10];但不能拆分出k=9;但是又可以拆分出k=5,即[5,5,5,5,5,5]. + +本题的解法其实就是暴力尝试。假设arr的长度是n,出现最小的频次是m,那么我们就从`k=m+1,m,...,1`逐个尝试,找到最大的k使得所有arr的元素都能成功拆分成若干个k或k-1的和。这样的时间复杂度看上去是0(mn). 事实上mn有制约关系,如果nums的种类各不相同,那么m就是1,n就是1e5;如果nums的种类完全相同,那么m就是1e5,n就是1. 事实上,o(mn)就是1e5数量级。 + +接下来我们考虑,如果给定了k,如何判定某个arr的元素x能成功拆封成若干个k或k-1之和?我们将x尽量拆分出最多的k来,得到`q = x/k`个group,以及余数`r = x%k`. 此时我们还差`k-r`才能凑出一个k。如果`k-r<=q+1`,意味着我们从之前的这些group里各自都拆借1加到最后一个“落单”的group,而那些变动的group里含有的元素就是`k-1`,依然符合要求. 注意,如果`r==0`时,需要另外处理这个corner case。 + +最终当我们找到最大的k,使得所有arr的x都能成立时,我们用`ceil(x*1.0/k)`即可计算出总共分成的group的数目。 diff --git a/Thinking/2939.Maximum-Xor-Product/2939.Maximum-Xor-Product.cpp b/Thinking/2939.Maximum-Xor-Product/2939.Maximum-Xor-Product.cpp new file mode 100644 index 000000000..d1cba2eb6 --- /dev/null +++ b/Thinking/2939.Maximum-Xor-Product/2939.Maximum-Xor-Product.cpp @@ -0,0 +1,63 @@ +using LL = long long; +LL M = 1e9+7; +class Solution { +public: + int maximumXorProduct(long long a, long long b, int n) + { + LL A = ((a>>n)<>n)<=0; k--) + { + LL bit1 = ((a>>k)&1LL); + LL bit2 = ((b>>k)&1LL); + if (bit1==bit2) + { + a = a - (bit1<B) + { + for (int k=n-1; k>=0; k--) + { + LL bit1 = ((a>>k)&1LL); + LL bit2 = ((b>>k)&1LL); + if (bit1==bit2) + { + a = a - (bit1<& coins, int target) + { + sort(coins.begin(), coins.end()); + int limit = 0; + int i = 0; + int ret = 0; + while (limit < target) + { + if (i==coins.size() || limit+1 < coins[i]) + { + ret++; + limit += limit+1; + } + else + { + limit += coins[i]; + i++; + } + } + + return ret; + } +}; diff --git a/Thinking/2952.Minimum-Number-of-Coins-to-be-Added/Readme.md b/Thinking/2952.Minimum-Number-of-Coins-to-be-Added/Readme.md new file mode 100644 index 000000000..e89c2d779 --- /dev/null +++ b/Thinking/2952.Minimum-Number-of-Coins-to-be-Added/Readme.md @@ -0,0 +1,5 @@ +### 2952.Minimum-Number-of-Coins-to-be-Added + +将所有的coins排序后,假设当前已有的硬币能够组成任意[0, limit]之间的面额,那么又得到一枚面值是x的硬币,此时能够组成多少种面额呢?显然,当新硬币不用时,我们依然能构造出[0, limit];当使用新硬币时,我们可以构造出[x, limit+x]。当这两段区间不连接时,即`limit+1=x`时,则意味着我们可以构造出任意[0,limit+x]区间内的面额。 + +此题和`330.Patching-Array`和`1798.Maximum-Number-of-Consecutive-Values-You-Can-Make`非常相似。 diff --git a/Thinking/2957.Remove-Adjacent-Almost-Equal-Characters/2957.Remove-Adjacent-Almost-Equal-Characters.cpp b/Thinking/2957.Remove-Adjacent-Almost-Equal-Characters/2957.Remove-Adjacent-Almost-Equal-Characters.cpp new file mode 100644 index 000000000..fdb7f18cd --- /dev/null +++ b/Thinking/2957.Remove-Adjacent-Almost-Equal-Characters/2957.Remove-Adjacent-Almost-Equal-Characters.cpp @@ -0,0 +1,18 @@ +class Solution { +public: + int removeAlmostEqualCharacters(string word) + { + int n = word.size(); + int ret = 0; + for (int i=0; i tx || sy > ty) return -1; + if (sx == tx && sy == ty) return 0; + + if (tx < ty) + return minMoves(sy, sx, ty, tx); + + if (tx==ty) { + int temp1 = minMoves(sy, sx, 0, ty); + if (temp1!=-1) return temp1+1; + + int temp2 = minMoves(sy, sx, tx, 0); + if (temp2!=-1) return temp2+1; + + return -1; + } + + if (tx > 2*ty) { + if (tx%2==1) return -1; + int temp = minMoves(sx, sy, tx/2, ty); + if (temp==-1) return -1; + else return temp+1; + } else { + int temp = minMoves(sx, sy, tx-ty, ty); + if (temp==-1) return -1; + else return temp+1; + } + + return -1; + } +}; diff --git a/Thinking/3609.Minimum-Moves-to-Reach-Target-in-Grid/Readme.md b/Thinking/3609.Minimum-Moves-to-Reach-Target-in-Grid/Readme.md new file mode 100644 index 000000000..b53881186 --- /dev/null +++ b/Thinking/3609.Minimum-Moves-to-Reach-Target-in-Grid/Readme.md @@ -0,0 +1,19 @@ +### 3609.Minimum-Moves-to-Reach-Target-in-Grid + +注意到sx,sy和tx,ty都是正数,所有的操作只能单向地使得数值变大。 + +我们思考(sx,sy)变成(tx,ty)的过程中的最后一步,即从(a,b)->(tx,ty)。因为增量`m=max(a,b)`很大,所以无论是第一个分量还是第二个分量,加上m之后都会大于另一个。所以我们从tx和ty的大小比较中就可以知道,最后一次操作是作用在了哪个分量上面。这里我们分情况讨论. + +如果tx>ty,那么显然最后一次操作是加在了第一个分量上面。即`tx=a+max(a,b), ty=b`。继续分类讨论 +1. 如果max(a,b)=a,则有tx=2a,继而得到`a=tx/2, b=ty`,这等价于`tx>=2*ty`. 也就是说,在此情况下,(tx,ty)的前一步必然是(tx/2,ty)。于是我们发现如果tx不能被2整除,那么前一步是不存在的;反之我们可以递归处理成`minMoves(sx,sy,tx/2,ty)+1`即可. + +2. 如果max(a,b)=a,则有tx=a+b,继而得到`a=tx-ty, b=ty`这等价于`tx<2*ty`. 在此情况下,(tx,ty)的前一步必然是(tx-ty,ty)。于是我们可以递归处理成`minMoves(sx,sy,tx-ty,ty)+1`即可. + +如果tx(x,x)。继续分类讨论: +1. 如果操作是作用于第一个分量上面,`x=a+max(a,b), x=b`,必然有a==0. 继而b=x,原题转化为`minMoves(sx,sy,0,x)+1` + +2. 如果操作是作用于第二个分量上面,`x=a, x=b+max(a,b)`,必然有b==0. 继而a=x,原题转化为`minMoves(sx,sy,x,0)+1` + +最终的边界条件就是`sx==tx && sy==ty`时,返回0. 如果tx和ty的任意分量小于sx和sy,那么返回-1. diff --git a/Thinking/3644.Maximum-K-to-Sort-a-Permutation/3644.Maximum-K-to-Sort-a-Permutation.cpp b/Thinking/3644.Maximum-K-to-Sort-a-Permutation/3644.Maximum-K-to-Sort-a-Permutation.cpp new file mode 100644 index 000000000..cafe46d5e --- /dev/null +++ b/Thinking/3644.Maximum-K-to-Sort-a-Permutation/3644.Maximum-K-to-Sort-a-Permutation.cpp @@ -0,0 +1,21 @@ +class Solution { +public: + int sortPermutation(vector& nums) { + int required_mask = -1; + bool is_sorted = true; + int n = nums.size(); + + for (int i = 0; i < n; ++i) { + if (nums[i] != i) { + is_sorted = false; + required_mask &= i; + } + } + + if (is_sorted) { + return 0; + }else { + return required_mask; + } + } +}; diff --git a/Thinking/3644.Maximum-K-to-Sort-a-Permutation/Readme.md b/Thinking/3644.Maximum-K-to-Sort-a-Permutation/Readme.md new file mode 100644 index 000000000..e69a3b14c --- /dev/null +++ b/Thinking/3644.Maximum-K-to-Sort-a-Permutation/Readme.md @@ -0,0 +1,7 @@ +### 3644.Maximum-K-to-Sort-a-Permutation + +这道题的第一反应是一定会有解吗?事实上本题的关键在于nums的值是0到n-1的排列。如果我们想到,0与任何数字的位与都是0,因此我们可以通过0与任意数字的交换来实现数组的重排序。举个例子,如果当前0是在index=4上,我们就寻找数组里的5并假设其在index=p的位置。通过(4,p)的交换,我们就把5送到了它应该去的位置(即idx=4),而把0送到了另一个p的位置。由此重复类似的操作。故k=0必然是一个解。 + +既然一定有解,那么最优解是什么呢?我们只需要考虑那些不在期望位置、必须重排的元素,记作v1,v2,...,vt。答案就是令k为这些元素的bitwise AND的结果。为什么这么神奇呢?首先k一定比这些v都要小,故k一定是落在[0,n-1]之间的。其次,我们发现v1&k, v2&k, ..., vt&k的结果一定都是k。所以我们只需要寻找nums的k所在的那个元素,把它类比于上述的0,就可以实现nums里对v1,v2,..,vt的任意重排。 + +有没有比k更好的答案了呢?不会,如果我们的交换涉及到比上述vi更多的元素,那么得到的bitwise AND的结果一定更小,结果也就一定比k更差。 diff --git a/Thinking/3660.Jump-Game-IX/3660.Jump-Game-IX.cpp b/Thinking/3660.Jump-Game-IX/3660.Jump-Game-IX.cpp new file mode 100644 index 000000000..ce95f215a --- /dev/null +++ b/Thinking/3660.Jump-Game-IX/3660.Jump-Game-IX.cpp @@ -0,0 +1,23 @@ +class Solution { +public: + vector maxValue(vector& nums) { + int n = nums.size(); + vectorpreMax(n); + vectorsufMin(n); + for (int i=0; i=0; i--) + sufMin[i] = min((i==n-1)?INT_MAX:sufMin[i+1], nums[i]); + + vectorrets(n); + rets[n-1] = preMax[n-1]; + for (int i=n-2; i>=0; i--) { + if (preMax[i]>sufMin[i+1]) + rets[i] = rets[i+1]; + else + rets[i] = preMax[i]; + } + + return rets; + } +}; diff --git a/Thinking/3660.Jump-Game-IX/Readme.md b/Thinking/3660.Jump-Game-IX/Readme.md new file mode 100644 index 000000000..31b1a3df6 --- /dev/null +++ b/Thinking/3660.Jump-Game-IX/Readme.md @@ -0,0 +1,11 @@ +### 3660.Jump-Game-IX + +此题乍看是个图论的问题,彼此可以跳转的点可以认为是联通的。但是构建所有的边需要n^2的复杂度。 + +我们定义preMax[i]表示前i个元素(包括自身)里的最大值。考察任意的rets[i],如果答案只在左边的话,那么答案就是preMax[i]。但是也有可能答案在右边。从i能往右跳转到哪些地方呢?我们势必会先借助于preMax[i],因为从preMax[i]跳转的话可以最大范围地覆盖到[i+1:n-1]里的可跳转区域。此时两种情况: + +1. 如果`preMax[i]sufMin[i+1]`,那么我们可以有这样一条跳转路径:i->preMax[i]->sufMin[i]->i+1. 最后一步跳转的依据是:根据定义,sufMin[i]是[i+1:n-1]里的最小值,必然小于等于nums[i+1].由此可知i与i+1是联通的,必然有`rets[i]=rets[i+1]`. + +由此我们发现了rets从后往前的递推关系。因为rets[n-1]必然等于preMax[n-1],由此可以根据以上的结论,依次推出i=n-2,n-1,...,0的答案。 diff --git a/Tree/099.Recover-Binary-Search-Tree/Readme.md b/Tree/099.Recover-Binary-Search-Tree/Readme.md index f85c3a72c..30b27bc3b 100644 --- a/Tree/099.Recover-Binary-Search-Tree/Readme.md +++ b/Tree/099.Recover-Binary-Search-Tree/Readme.md @@ -1,8 +1,8 @@ ### 099.Recover-Binary-Search-Tree -因为是BST,所以按先序遍历访问下来应该是一个递增的数列。如果一个递增的数列里出现两个数字的对调,那么会有两个尖峰。显然,第一个尖峰的顶和第二个尖峰的谷,就是被掉包的那两个数字。 +因为是BST,所以按中序遍历访问下来应该是一个递增的数列。如果一个递增的数列里出现两个数字的对调,那么会有两个尖峰。显然,第一个尖峰的顶和第二个尖峰的谷,就是被掉包的那两个数字。 -本题按先序遍历访问BST(采用DFS递归的方法)。初始化三个公共变量 +本题按中序遍历访问BST(采用DFS递归的方法)。初始化三个公共变量 ```cpp TreeNode* first=NULL; TreeNode* Second=NULL; @@ -17,4 +17,4 @@ TreeNode* CurMax=new TreeNode(INT_MIN); 这里还有一个关键点:如果整个树的两个掉包元素是相邻的,那么整个遍历只会找到一个尖峰。所以这里未雨绸缪的技巧是,在处理第一个尖峰时,同时把第二个掉包元素也设置 second==node. 后续如果找到了第二个尖峰,则second会被覆盖。 -[Leetcode Link](https://leetcode.com/problems/recover-binary-search-tree) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/recover-binary-search-tree) diff --git a/Tree/1522.Diameter-of-N-Ary-Tree/1522.Diameter-of-N-Ary-Tree.cpp b/Tree/1522.Diameter-of-N-Ary-Tree/1522.Diameter-of-N-Ary-Tree.cpp new file mode 100644 index 000000000..b965af975 --- /dev/null +++ b/Tree/1522.Diameter-of-N-Ary-Tree/1522.Diameter-of-N-Ary-Tree.cpp @@ -0,0 +1,56 @@ +/* +// Definition for a Node. +class Node { +public: + int val; + vector children; + + Node() {} + + Node(int _val) { + val = _val; + } + + Node(int _val, vector _children) { + val = _val; + children = _children; + } +}; +*/ + +class Solution { + int ret = 0; +public: + int diameter(Node* root) + { + DFS(root); + return ret-1; + } + + int DFS(Node* node) + { + if (node==NULL) return 0; + vectordepths; + for (auto child: node->children) + depths.push_back(DFS(child)); + sort(depths.rbegin(), depths.rend()); + + int n = depths.size(); + if (n>=2) + { + ret = max(ret, depths[0]+depths[1]+1); + return depths[0]+1; + } + else if (n==1) + { + ret = max(ret, depths[0]+1); + return depths[0]+1; + } + else + { + ret = max(ret, 1); + return 1; + } + + } +}; diff --git a/Tree/1522.Diameter-of-N-Ary-Tree/Readme.md b/Tree/1522.Diameter-of-N-Ary-Tree/Readme.md new file mode 100644 index 000000000..01832c971 --- /dev/null +++ b/Tree/1522.Diameter-of-N-Ary-Tree/Readme.md @@ -0,0 +1,7 @@ +### 1522.Diameter-of-N-Ary-Tree + +本题和```543.Diameter-of-Binary-Tree```一样的思路。在rooted-tree里,任何路径都一定有一个最高的拐点,往左是一条以某个孩子为起点的单链,往右是另一条以某个孩子为起点单链(所谓单链就是一路向下奔向叶子节点、不带拐弯的路径)。显然,我们会取所有孩子节点里最长的两条单链。 + +所以本题的本质就是求以任意节点node为起点的最长单链长度```h(node)```。显然有递推关系:```h(node) = max{h(node->child)}+1```. + +对于任意节点node,我们会找它孩子节点中最长的两条单链,再加1,就是以node为拐点的最长路径。全局的最长路径就是最终答案。 diff --git a/Tree/156.Binary-Tree-Upside-Down/156.Binary Tree Upside Down.cpp b/Tree/156.Binary-Tree-Upside-Down/156.Binary Tree Upside Down.cpp deleted file mode 100644 index e6199ab46..000000000 --- a/Tree/156.Binary-Tree-Upside-Down/156.Binary Tree Upside Down.cpp +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Definition for a binary tree node. - * struct TreeNode { - * int val; - * TreeNode *left; - * TreeNode *right; - * TreeNode(int x) : val(x), left(NULL), right(NULL) {} - * }; - */ -class Solution { -public: - TreeNode* upsideDownBinaryTree(TreeNode* root) - { - if (root==NULL) return NULL; - - if (root->left==NULL) - return root; - else - { - TreeNode* nextRoot = root->left; - TreeNode* nextRootAlien = new TreeNode(root->left->val); - nextRootAlien->right = root; - nextRootAlien->left = root->right; - root->left=NULL; - root->right=NULL; - return DFS(nextRoot,nextRootAlien); - } - } - - TreeNode* DFS(TreeNode* root, TreeNode* rootAlien) - { - if (root->left==NULL) - return rootAlien; - else - { - TreeNode* nextRoot = root->left; - TreeNode* nextRootAlien = new TreeNode(root->left->val); - nextRootAlien->right = rootAlien; - nextRootAlien->left = root->right; - return DFS(nextRoot,nextRootAlien); - } - - } -}; diff --git a/Tree/156.Binary-Tree-Upside-Down/156.Binary-Tree-Upside-Down.cpp b/Tree/156.Binary-Tree-Upside-Down/156.Binary-Tree-Upside-Down.cpp new file mode 100644 index 000000000..be8711117 --- /dev/null +++ b/Tree/156.Binary-Tree-Upside-Down/156.Binary-Tree-Upside-Down.cpp @@ -0,0 +1,30 @@ +/** + * Definition for a binary tree node. + * struct TreeNode { + * int val; + * TreeNode *left; + * TreeNode *right; + * TreeNode(int x) : val(x), left(NULL), right(NULL) {} + * }; + */ +class Solution { +public: + TreeNode* upsideDownBinaryTree(TreeNode* root) + { + if (root==NULL) return NULL; + if (root->left==NULL && root->right==NULL) return root; + + TreeNode* head = upsideDownBinaryTree(root->left); + TreeNode* node = head; + + while (node->right!=NULL) + node=node->right; + + node->left = root->right; + node->right = root; + root->left=NULL; + root->right=NULL; + + return head; + } +}; diff --git a/Tree/2246.Longest-Path-With-Different-Adjacent-Characters/2246.Longest-Path-With-Different-Adjacent-Characters.cpp b/Tree/2246.Longest-Path-With-Different-Adjacent-Characters/2246.Longest-Path-With-Different-Adjacent-Characters.cpp new file mode 100644 index 000000000..961ba5cab --- /dev/null +++ b/Tree/2246.Longest-Path-With-Different-Adjacent-Characters/2246.Longest-Path-With-Different-Adjacent-Characters.cpp @@ -0,0 +1,52 @@ +class Solution { + vector children[100000]; + int len[100000]; + int globalRet = 1; + string s; +public: + int longestPath(vector& parent, string s) + { + int n = parent.size(); + this->s = s; + + for (int i=0; itemp; + for (int child: children[node]) + { + dfs(child); + if (s[child]!=s[node]) + { + ret = max(ret, len[child]+1); + temp.push_back(len[child]); + } + } + len[node] = ret; + + sort(temp.rbegin(), temp.rend()); + if (temp.size()>=2) + globalRet = max(globalRet, temp[0]+temp[1]+1); + else if (temp.size()==1) + globalRet = max(globalRet, temp[0]+1); + else + globalRet = max(globalRet, 1); + } +}; diff --git a/Tree/2246.Longest-Path-With-Different-Adjacent-Characters/Readme.md b/Tree/2246.Longest-Path-With-Different-Adjacent-Characters/Readme.md new file mode 100644 index 000000000..830ee9262 --- /dev/null +++ b/Tree/2246.Longest-Path-With-Different-Adjacent-Characters/Readme.md @@ -0,0 +1,9 @@ +### 2246.Longest-Path-With-Different-Adjacent-Characters + +这是一道非常典型的rooted-tree内的路径问题。在给定root的树型图里,任何路径都必然有一个拐点,我们遍历所有的节点,考虑对每个节点作为拐点时的最优路径,这样我们就可以做到不遗漏地求出全局的最优路径。 + +对于本题而言,以Node为拐点的最长路径,必然由两条以它孩子节点为起点的最长单链路径(即一直向下没有拐弯)拼接组成。所以本题的核心就转化为,求对于每个节点,从它往下走能够找到的最长单链路径h。很显然这个函数具有递归性质: +``` +h(node) = h(node->child) +1 (if node is different from its child), otherwise 1 +``` +有了h之后,考虑每个节点作为拐点时,转化为能否找到两个可以与自己拼接起来的、孩子的最长单链即可。在全局中找最优解。 diff --git a/Tree/2277.Closest-Node-to-Path-in-Tree/2277.Closest-Node-to-Path-in-Tree_v1.cpp b/Tree/2277.Closest-Node-to-Path-in-Tree/2277.Closest-Node-to-Path-in-Tree_v1.cpp new file mode 100644 index 000000000..9c7753a92 --- /dev/null +++ b/Tree/2277.Closest-Node-to-Path-in-Tree/2277.Closest-Node-to-Path-in-Tree_v1.cpp @@ -0,0 +1,61 @@ +class Solution { + vectornext[1005]; + int matrix[1005][1005]; +public: + vector closestNode(int n, vector>& edges, vector>& query) + { + for (auto& edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].push_back(b); + next[b].push_back(a); + } + + for (int i=0; irets; + for (auto& q: query) + { + int start = q[0], end = q[1], node = q[2]; + int dist = INT_MAX; + int ret; + + int cur = start; + while (1) + { + if (matrix[cur][node] < dist) + { + dist = matrix[cur][node]; + ret = cur; + } + if (cur==end) break; + + for (int j: next[cur]) + { + if (matrix[cur][end]==matrix[j][end]+1) + { + cur = j; + break; + } + } + } + rets.push_back(ret); + } + + return rets; + + } + + void dfs(int root, int cur, int dist) + { + for (int j: next[cur]) + { + if (j!=root && matrix[root][j]==0) + { + matrix[root][j] = dist+1; + dfs(root, j, dist+1); + } + } + } +}; diff --git a/Tree/2277.Closest-Node-to-Path-in-Tree/2277.Closest-Node-to-Path-in-Tree_v2.cpp b/Tree/2277.Closest-Node-to-Path-in-Tree/2277.Closest-Node-to-Path-in-Tree_v2.cpp new file mode 100644 index 000000000..30f174ab5 --- /dev/null +++ b/Tree/2277.Closest-Node-to-Path-in-Tree/2277.Closest-Node-to-Path-in-Tree_v2.cpp @@ -0,0 +1,90 @@ +using ll = long long; +const int MAXN = 100000; +const int LOGN = 17; +class Solution { +public: + vector> adj[MAXN]; + int up[MAXN][LOGN+1]; + int depth[MAXN]; + ll distRoot[MAXN]; + + void dfs(int cur, int parent) + { + up[cur][0] = parent; + for(auto &[v,w]: adj[cur]) + { + if(v == parent) continue; + depth[v] = depth[cur] + 1; + distRoot[v] = distRoot[cur] + w; + dfs(v, cur); + } + } + + int lca(int a, int b) + { + if(depth[a] < depth[b]) swap(a,b); + int diff = depth[a] - depth[b]; + for(int k = 0; k <= LOGN; k++){ + if(diff & (1<= 0; k--){ + if(up[a][k] != up[b][k]){ + a = up[a][k]; + b = up[b][k]; + } + } + return up[a][0]; + } + + ll dist(int a, int b) + { + int c = lca(a,b); + return distRoot[a] + distRoot[b] - 2*distRoot[c]; + } + + ll stepUp(int u, int k) { + for (int i=LOGN; i>=0; i--) { + if ((k>>i)&1) { + u = up[u][i]; + } + } + return u; + } + + vector closestNode(int n, vector>& edges, vector>& query) { + for (auto& edge: edges) + { + int u = edge[0], v = edge[1]; + adj[u].push_back({v,1}); + adj[v].push_back({u,1}); + } + + depth[0] = 0; + distRoot[0] = 0; + dfs(0, 0); + + vectorrets; + for(int k = 1; k <= LOGN; k++) { + for(int v = 0; v < n; v++) { + up[v][k] = up[up[v][k-1]][k-1]; + } + } + + for (auto&q: query) { + int u = q[0], v = q[1], k = q[2]; + vector>ans; + int uv = lca(u,v); + int uk = lca(u,k); + int vk = lca(v,k); + ans.push_back({depth[uv], uv}); + ans.push_back({depth[uk], uk}); + ans.push_back({depth[vk], vk}); + sort(ans.rbegin(), ans.rend()); + + rets.push_back(ans[0].second); + } + + return rets; + } +}; diff --git a/Tree/2277.Closest-Node-to-Path-in-Tree/Readme.md b/Tree/2277.Closest-Node-to-Path-in-Tree/Readme.md new file mode 100644 index 000000000..f6633c046 --- /dev/null +++ b/Tree/2277.Closest-Node-to-Path-in-Tree/Readme.md @@ -0,0 +1,13 @@ +### 2277.Closest-Node-to-Path-in-Tree + +#### 解法1:DFS + +本题的时间复杂度要求是o(N^2),所以常规解法是,从node开始dfs得到所有节点到node的距离dist2node。然后从start开始dfs整棵树,对于能够通往end的这个分支上的节点,取最小的dist2node。 + +本题还有一种比较精彩的解法。先遍历所有的点作为起点,dfs整棵树,这样得到全局的matrix[i][j]表示任意两点之间的距离。然后对于start,我们遍历它的邻居j,发现如果有```matrix[start][end]==matrix[j][end]+1```,说明j是位于从start到end的路径上。依次递归下去,就能直接从start走向end,沿途中取最小的matrix[j][node]. + +#### 解法2:LCA + Binary Lifting +一个非常好用的结论:在一棵树里,w点到u->v路径最近的点,其实就是以下三个点里深度最大的那一个:lca(u,v), lca(u,w), lca(v,w) + +我们用binary lifting模板就可以很容易地得到这三个点的位置,取depth最大的那个即可。 + diff --git a/Tree/2313.Minimum-Flips-in-Binary-Tree-to-Get-Result/2313.Minimum-Flips-in-Binary-Tree-to-Get-Result.cpp b/Tree/2313.Minimum-Flips-in-Binary-Tree-to-Get-Result/2313.Minimum-Flips-in-Binary-Tree-to-Get-Result.cpp new file mode 100644 index 000000000..45618a38c --- /dev/null +++ b/Tree/2313.Minimum-Flips-in-Binary-Tree-to-Get-Result/2313.Minimum-Flips-in-Binary-Tree-to-Get-Result.cpp @@ -0,0 +1,86 @@ +/** + * Definition for a binary tree node. + * struct TreeNode { + * int val; + * TreeNode *left; + * TreeNode *right; + * TreeNode() : val(0), left(nullptr), right(nullptr) {} + * TreeNode(int x) : val(x), left(nullptr), right(nullptr) {} + * TreeNode(int x, TreeNode *left, TreeNode *right) : val(x), left(left), right(right) {} + * }; + */ +class Solution { + unordered_map>dp; +public: + int minimumFlips(TreeNode* root, bool result) + { + return dfs(root,result); + } + + int dfs(TreeNode* node, int expected) + { + if (!node->left && !node->right) + { + return node->val != expected; + } + + if (dp.find(node)!=dp.end() && dp[node].find(expected)!=dp[node].end()) + return dp[node][expected]; + + int ans = INT_MAX/2; + if (node->val == 2) + { + if (expected == 1) + { + ans = min(ans, dfs(node->left, 1)); + ans = min(ans, dfs(node->right, 1)); + } + else if (expected == 0) + { + ans = min(ans, dfs(node->left, 0) + dfs(node->right, 0)); + } + } + else if (node->val == 3) + { + if (expected == 1) + { + ans = min(ans, dfs(node->left, 1) + dfs(node->right, 1)); + } + else if (expected == 0) + { + ans = min(ans, dfs(node->left, 0)); + ans = min(ans, dfs(node->right, 0)); + } + } + else if (node->val == 4) + { + if (expected == 1) + { + ans = min(ans, dfs(node->left, 0) + dfs(node->right, 1)); + ans = min(ans, dfs(node->left, 1) + dfs(node->right, 0)); + } + else if (expected == 0) + { + ans = min(ans, dfs(node->left, 0) + dfs(node->right, 0)); + ans = min(ans, dfs(node->left, 1) + dfs(node->right, 1)); + } + } + else + { + TreeNode* child = node->left ? node->left : node->right; + if (expected == 1) + { + ans = min(ans, dfs(child, 0)); + } + else if (expected == 0) + { + ans = min(ans, dfs(child, 1)); + } + } + + dp[node][expected] = ans; + return ans; + } + + +}; diff --git a/Tree/2313.Minimum-Flips-in-Binary-Tree-to-Get-Result/Readme.md b/Tree/2313.Minimum-Flips-in-Binary-Tree-to-Get-Result/Readme.md new file mode 100644 index 000000000..bec4b1138 --- /dev/null +++ b/Tree/2313.Minimum-Flips-in-Binary-Tree-to-Get-Result/Readme.md @@ -0,0 +1,14 @@ +### 2313.Minimum-Flips-in-Binary-Tree-to-Get-Result + +本题只要想到用tree dp来做,一切就迎刃而解。我们定义```dfs(node, expected)```表示为使得子树eval(node)的结果等于expected,所需要所做的最小修改。然后我们根据node的运算符和expcted的数值,分情况讨论: +1. 如果node是OR,且expcted是true,那么只要返回```dfs(node->left, 1)```或者```dfs(node->right, 1)```中的较小值. +2. 如果node是OR,且expcted是false,那么需要递归```dfs(node->left, 0)+dfs(node->right, 0)```. +3. 如果node是AND,且expcted是true,那么需要递归```dfs(node->left, 1)+dfs(node->right, 1)```. +4. 如果node是AND,且expcted是false,那么只要返回```dfs(node->left, 0)```或者```dfs(node->right, 0)```中的较小值. +5. 如果node是XOR,且expcted是true,那么只要返回```dfs(node->left, 0)+dfs(node->right, 1)```或者```dfs(node->left, 1)+dfs(node->right, 0)```中的较小值. +6. 如果node是XOR,且expcted是false,那么只要返回```dfs(node->left, 0)+dfs(node->right, 0)```或者```dfs(node->left, 1)+dfs(node->right, 1)```中的较小值. +7. 如果node是NOT,且expcted是true,那么需要递归```dfs(child, 0)```,其中child是node的唯一子树(左子树或者右子树) +8. 如果node是NOT,且expcted是false,那么需要递归```dfs(child, 1)``` +9. 如果node是叶子节点,那么只需要返回```node->val != expected``` + +记得所有的结果需要记忆化,加快访问。 diff --git a/Tree/2322.Minimum-Score-After-Removals-on-a-Tree/2322.Minimum-Score-After-Removals-on-a-Tree.cpp b/Tree/2322.Minimum-Score-After-Removals-on-a-Tree/2322.Minimum-Score-After-Removals-on-a-Tree.cpp new file mode 100644 index 000000000..1c36e33b4 --- /dev/null +++ b/Tree/2322.Minimum-Score-After-Removals-on-a-Tree/2322.Minimum-Score-After-Removals-on-a-Tree.cpp @@ -0,0 +1,87 @@ +class Solution { + unordered_set next[1005]; // next[i]: a set of adjacent nodes next to i + int visited[1005]; + vectornums; + int n; +public: + int minimumScore(vector& nums, vector>& edges) + { + this->n = nums.size(); + this->nums = nums; + for (auto edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].insert(b); + next[b].insert(a); + } + + int ret = INT_MAX; + for (auto edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].erase(b); + next[b].erase(a); + + int ret1 = solve(a, b); + int ret2 = solve(b, a); + + ret = min(ret, min(ret1, ret2)); + + next[a].insert(b); + next[b].insert(a); + } + + return ret; + } + + int solve(int a, int b) + { + fill(visited, visited+n, 0); + visited[a] = 1; + int A = getAll(a); + + fill(visited, visited+n, 0); + visited[b] = 1; + int B = getAll(b); + + int ret = INT_MAX; + fill(visited, visited+n, 0); + visited[a] = 1; + dfs(a, ret, A, B); + return ret; + } + + int dfs(int node, int& ret, int A, int B) + { + int total = 0; + for (int nxt: next[node]) + { + if (visited[nxt]==1) continue; + visited[nxt] = 1; + + int C = dfs(nxt, ret, A, B); + int other = A^C; + int mx = max(other, max(C, B)); + int mn = min(other, min(C, B)); + ret = min(ret, mx-mn); + + total^=C; + } + total ^= nums[node]; + return total; + } + + int getAll(int node) + { + int total = 0; + for (int nxt: next[node]) + { + if (visited[nxt]==1) continue; + visited[nxt] = 1; + total ^= getAll(nxt); + } + total ^= nums[node]; + return total; + } + +}; diff --git a/Tree/2322.Minimum-Score-After-Removals-on-a-Tree/Readme.md b/Tree/2322.Minimum-Score-After-Removals-on-a-Tree/Readme.md new file mode 100644 index 000000000..8537558b9 --- /dev/null +++ b/Tree/2322.Minimum-Score-After-Removals-on-a-Tree/Readme.md @@ -0,0 +1,11 @@ +### 2322.Minimum-Score-After-Removals-on-a-Tree + +题目中说要删除两条边,同时考虑的话太复杂吃不消。我们不妨先枚举其中一条边(a-b),将其砍断的话整张图就变成了两棵树,分别以a和b作为根。此时我们还需要再砍一条边,但这条边只能存在于其中一棵树里面。不妨假设是砍在了a树。那么对于b树,我们就无脑取其所有节点的XOR(记做B)即可。 + +接下来看a树,需要将其砍一刀变成两个部分。此时我们发现,任意一刀,都会将a树里砍下一棵子树(假设称为c)来。那么我们需要计算的是c子树的节点XOR(记做C),和a子树剩下部分的XOR。突破口来了,剩下部分的XOR,其实就是a子树整体的XOR(记做A),与C做XOR即可。最终这两刀砍成的三个部分,就分别是B,C,和A^C。 + +此时我们看到,只要在a树里面DFS遍历节点,那么很容易用递归的方法,用o(n)的时间计算每个节点其下方所有孩子的XOR,也就是将其作为c子树的根时所对应的C值。如果我们提前计算了A和B,那么A^C也就马上有了。于是在DFS整棵a树的节点过程中,我们等同于遍历了第二刀的位置,同时立马得到了第二刀砍出的三个部分。 + +所以本题的思想是,暴力枚举第一刀,然后遍历其中的一颗子树枚举第二刀的位置。总的时间复杂度是o(N^2). + +当然,本题还有复杂度优化的空间,比如说用移根的方法来枚举第一刀,简化第二刀的遍历。这里不深究。 diff --git a/Tree/2445.Number-of-Nodes-With-Value-One/2445.Number-of-Nodes-With-Value-One.cpp b/Tree/2445.Number-of-Nodes-With-Value-One/2445.Number-of-Nodes-With-Value-One.cpp new file mode 100644 index 000000000..d531339be --- /dev/null +++ b/Tree/2445.Number-of-Nodes-With-Value-One/2445.Number-of-Nodes-With-Value-One.cpp @@ -0,0 +1,25 @@ +class Solution { + int ret = 0; + unordered_mapMap; +public: + int numberOfNodes(int n, vector& queries) + { + for (int q: queries) + Map[q]++; + dfs(1,0,n); + return ret; + } + + void dfs(int cur, int flips, int n) + { + if (cur > n) return; + + if (Map.find(cur)!=Map.end()) + flips += Map[cur]; + if (flips%2==1) + ret++; + + dfs(cur*2, flips, n); + dfs(cur*2+1, flips, n); + } +}; diff --git a/Tree/2445.Number-of-Nodes-With-Value-One/Readme.md b/Tree/2445.Number-of-Nodes-With-Value-One/Readme.md new file mode 100644 index 000000000..e4e236c1a --- /dev/null +++ b/Tree/2445.Number-of-Nodes-With-Value-One/Readme.md @@ -0,0 +1,3 @@ +### 2445.Number-of-Nodes-With-Value-One + +我们从根往下遍历每个节点,递归过程中累加所遇到的queries的数目,即意味着当前这个节点会被翻转多少次。 diff --git a/Tree/2458.Height-of-Binary-Tree-After-Subtree-Removal-Queries/2458.Height-of-Binary-Tree-After-Subtree-Removal-Queries.cpp b/Tree/2458.Height-of-Binary-Tree-After-Subtree-Removal-Queries/2458.Height-of-Binary-Tree-After-Subtree-Removal-Queries.cpp new file mode 100644 index 000000000..08ee36acc --- /dev/null +++ b/Tree/2458.Height-of-Binary-Tree-After-Subtree-Removal-Queries/2458.Height-of-Binary-Tree-After-Subtree-Removal-Queries.cpp @@ -0,0 +1,48 @@ +/** + * Definition for a binary tree node. + * struct TreeNode { + * int val; + * TreeNode *left; + * TreeNode *right; + * TreeNode() : val(0), left(nullptr), right(nullptr) {} + * TreeNode(int x) : val(x), left(nullptr), right(nullptr) {} + * TreeNode(int x, TreeNode *left, TreeNode *right) : val(x), left(left), right(right) {} + * }; + */ +class Solution { + unordered_map>d2h; + int depth[100005]; + int height[100005]; +public: + vector treeQueries(TreeNode* root, vector& queries) + { + dfs_height(root, 0); + for (auto& [d, hs] : d2h) + sort(hs.rbegin(), hs.rend()); + + vectorrets; + for (int node: queries) + { + int d = depth[node]; + int h = height[node]; + if (d2h[d].size()==1) + rets.push_back(d - 1); + else if (d2h[d][0] == h) + rets.push_back(d2h[d][1] + d); + else + rets.push_back(d2h[d][0] + d); + } + return rets; + + } + + int dfs_height(TreeNode* node, int d) + { + if (node==NULL) return -1; + int h = max(dfs_height(node->left, d+1), dfs_height(node->right, d+1)) + 1; + d2h[d].push_back(h); + depth[node->val] = d; + height[node->val] = h; + return h; + } +}; diff --git a/Tree/2458.Height-of-Binary-Tree-After-Subtree-Removal-Queries/Readme.md b/Tree/2458.Height-of-Binary-Tree-After-Subtree-Removal-Queries/Readme.md new file mode 100644 index 000000000..b0c1525b2 --- /dev/null +++ b/Tree/2458.Height-of-Binary-Tree-After-Subtree-Removal-Queries/Readme.md @@ -0,0 +1,7 @@ +### 2458.Height-of-Binary-Tree-After-Subtree-Removal-Queries + +我们定义一个节点的height表示从它到叶子节点的最大距离,depth表示从它到root的距离。 + +我们移除node节点对应的子树后,剩下的树的高度其实就取决于与它同depth的节点的height。所以我们将所有处在同depth的节点的height都提前收集好,那么就很容易找到其他子树的最大height。 + +特别注意,如果某个深度的节点只有一个,那么将其移除后,剩下树的最大高度就是该节点的depth-1. diff --git a/Tree/2467.Most-Profitable-Path-in-a-Tree/2467.Most-Profitable-Path-in-a-Tree.cpp b/Tree/2467.Most-Profitable-Path-in-a-Tree/2467.Most-Profitable-Path-in-a-Tree.cpp new file mode 100644 index 000000000..25e5f7de9 --- /dev/null +++ b/Tree/2467.Most-Profitable-Path-in-a-Tree/2467.Most-Profitable-Path-in-a-Tree.cpp @@ -0,0 +1,68 @@ +class Solution { + int b[100005]; + vectornext[100005]; + int ret = INT_MIN/2; + int bob; + vectoramount; +public: + int mostProfitablePath(vector>& edges, int bob, vector& amount) + { + this->bob = bob; + this->amount = amount; + + int n = amount.size(); + for (int i=0; i cycleLengthQueries(int n, vector>& queries) + { + vectorrets; + for (auto& q: queries) + { + int a = q[0], b = q[1]; + int count = 0; + while (a!=b) + { + if (a>b) + a = a/2; + else + b = b/2; + count++; + } + rets.push_back(count+1); + } + return rets; + + } +}; diff --git a/Tree/2509.Cycle-Length-Queries-in-a-Tree/Readme.md b/Tree/2509.Cycle-Length-Queries-in-a-Tree/Readme.md new file mode 100644 index 000000000..4ae71cbd3 --- /dev/null +++ b/Tree/2509.Cycle-Length-Queries-in-a-Tree/Readme.md @@ -0,0 +1,3 @@ +### 2509.Cycle-Length-Queries-in-a-Tree + +此题的本质就是找lowest common ancestor。本题的特殊之处在于每个节点指向其parent的路径其实非常简洁:val->val/2,数值是单调递减的。所以我们对于任意两个节点a和b,只要每次将其中的较小值减半,就一定最终让两者收敛一致,即LCA。减半操作的次数就是cycle的长度。 diff --git a/Tree/2538.Difference-Between-Maximum-and-Minimum-Price-Sum/2538.Difference-Between-Maximum-and-Minimum-Price-Sum.cpp b/Tree/2538.Difference-Between-Maximum-and-Minimum-Price-Sum/2538.Difference-Between-Maximum-and-Minimum-Price-Sum.cpp new file mode 100644 index 000000000..8a22d6b13 --- /dev/null +++ b/Tree/2538.Difference-Between-Maximum-and-Minimum-Price-Sum/2538.Difference-Between-Maximum-and-Minimum-Price-Sum.cpp @@ -0,0 +1,78 @@ +using LL = long long; +class Solution { + vector next[100005]; + LL sum1[100005]; // sum1[node]: the maximum path from node to its non-leaf child + LL sum2[100005]; // sum2[node]: the maximum path from node to its leaf child + LL ret = 0; +public: + long long maxOutput(int n, vector>& edges, vector& price) + { + if (n==1) return 0; + + for (auto& edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].push_back(b); + next[b].push_back(a); + } + + dfs(0, -1, price); + + dfs2(0, -1, price); + + return ret; + } + + void dfs(int cur, int parent, vector& price) + { + if (next[cur].size()==1 && next[cur][0] == parent) + { + sum1[cur] = 0; + sum2[cur] = price[cur]; + return; + } + + LL maxSum1 = 0, maxSum2 = 0; + for (int nxt: next[cur]) + { + if (nxt == parent) continue; + dfs(nxt, cur, price); + maxSum1 = max(maxSum1, sum1[nxt]); + maxSum2 = max(maxSum2, sum2[nxt]); + } + sum1[cur] = maxSum1 + price[cur]; + sum2[cur] = maxSum2 + price[cur]; + } + + void dfs2(int cur, int parent, vector& price) + { + vector>arr1; // {SumVal, childNodeId} + vector>arr2; + + LL ans = sum1[cur]; + if (cur!=0) ans = max(ans, sum2[cur]); + + for (int nxt: next[cur]) + { + if (nxt==parent) continue; + arr1.push_back({sum1[nxt], nxt}); + arr2.push_back({sum2[nxt], nxt}); + dfs2(nxt, cur, price); + } + sort(arr1.rbegin(), arr1.rend()); + sort(arr2.rbegin(), arr2.rend()); + + if (arr1.size() >= 2) + { + if (arr1[0].second!=arr2[0].second) + ans = max(ans, arr1[0].first + arr2[0].first + price[cur]); + else + ans = max(ans, max(arr1[0].first + arr2[1].first, arr1[1].first + arr2[0].first) + price[cur]); + } + + ret = max(ret, ans); + } +}; + + +// Find a maximum path, for which one end is leaf, and the other is not. diff --git a/Tree/2538.Difference-Between-Maximum-and-Minimum-Price-Sum/Readme.md b/Tree/2538.Difference-Between-Maximum-and-Minimum-Price-Sum/Readme.md new file mode 100644 index 000000000..f0cefd8b5 --- /dev/null +++ b/Tree/2538.Difference-Between-Maximum-and-Minimum-Price-Sum/Readme.md @@ -0,0 +1,40 @@ +### 2538.Difference-Between-Maximum-and-Minimum-Price-Sum + +因为选定了root之后的Min Price必然对应的就是root节点本身,所以所谓的“Difference-Between-Maximum-and-Minimum-Price-Sum”就是从root的邻接节点开始找一条最大路径。既然是求最大路径,必然我们会将另一个端点取到某个叶子节点。综上,本题的本质是在树里找一条最大路径,一端是叶子节点,另一端是非叶子节点(这样它必然有一个邻接节点可以作为root)。 + +在树里找最大路径,最常见的方法就是遍历“拐点”。我们以任意一个节点为根来观察图,树里的任何一条路径都有一个“拐点”。我们就是需要给这个拐点找两个“下垂”的分支路径,一个延伸到叶子节点,另一个不能包括叶子节点。当然,还有第三种情况,就是“拐点”本身就是一个端点,那样的话我们只需要找一条“下垂”的分支路径,该路径不能延伸到叶子节点。 + +我们可以提前用递归预处理整棵树。对于每个节点,我们可以计算出该节点向下到非叶子节点的最大路径(记做sum1),以及该节点向下到叶子节点的最大路径(记做sum2)。显然,对于sum1而言,只需要在递归的时候不加入叶子节点即可。代码如下: +```cpp + void dfs(int cur, int prev, vector& price) + { + if (当前节点cur是叶子节点) + { + sum1[cur] = 0; + sum2[cur] = price[cur]; + return; + } + + LL maxSum1 = 0, maxSum2 = 0; + for (int nxt: next[cur]) + { + if (nxt==prev) continue; + dfs(nxt, cur, price); + maxSum1 = max(maxSum1, sum1[nxt]); + maxSum2 = max(maxSum2, sum2[nxt]); + } + + sum1[cur] = maxSum1 + price[cur]; + sum2[cur] = maxSum2 + price[cur]; + } +``` + +然后我们第二遍递归整棵树,对于每个节点cur,有两种构造路径的情况: +1. 该节点就是路径的端点,于是路径的值就是`sum1[cur]` +2. 该节点是拐点,且至少有两条向下的路径,那么我们在cur的所有孩子里找最大的sum1和最大的sum2,这两段拼接起来即可。但是有可能这两段向下的路径对应的是同一条支路。这样的话,我们需要选取“最大的sum1和次大的sum2”,以及“次大的sum1和最大的sum2”。 + +在以上三个答案中,我们挑选最大的一个,作为以cur为拐点的最大路径。最终遍历所有的节点,取全局最大值。 + +特别注意,当n=1时,全图只有一个节点,根和叶子节点重合,`if (当前节点cur是叶子节点)`的代码需要格外小心。 + + diff --git a/Tree/2581.Count-Number-of-Possible-Root-Nodes/2581.Count-Number-of-Possible-Root-Nodes.cpp b/Tree/2581.Count-Number-of-Possible-Root-Nodes/2581.Count-Number-of-Possible-Root-Nodes.cpp new file mode 100644 index 000000000..a35eb63e7 --- /dev/null +++ b/Tree/2581.Count-Number-of-Possible-Root-Nodes/2581.Count-Number-of-Possible-Root-Nodes.cpp @@ -0,0 +1,52 @@ +class Solution { + vector next[100005]; + unordered_set guess[100005]; + int k; + int ret = 0; +public: + int rootCount(vector>& edges, vector>& guesses, int k) { + this->k = k; + int n = edges.size()+1; + for (auto& e: edges) + { + next[e[0]].push_back(e[1]); + next[e[1]].push_back(e[0]); + } + for (auto& g: guesses) + guess[g[0]].insert(g[1]); + + int count = dfs(0, -1); + + dfs2(0, -1, count); + + return ret; + } + + int dfs(int cur, int parent) + { + int count = 0; + for (int nxt: next[cur]) + { + if (nxt==parent) continue; + count += dfs(nxt, cur); + if (guess[cur].find(nxt)!=guess[cur].end()) + count +=1; + } + return count; + } + + void dfs2(int cur, int parent, int count) + { + if (count >= k) ret++; + for (int nxt: next[cur]) + { + if (nxt==parent) continue; + int temp = count; + if (guess[cur].find(nxt)!=guess[cur].end()) + temp -= 1; + if (guess[nxt].find(cur)!=guess[nxt].end()) + temp += 1; + dfs2(nxt, cur, temp); + } + } +}; diff --git a/Tree/2581.Count-Number-of-Possible-Root-Nodes/Readme.md b/Tree/2581.Count-Number-of-Possible-Root-Nodes/Readme.md new file mode 100644 index 000000000..b6495713f --- /dev/null +++ b/Tree/2581.Count-Number-of-Possible-Root-Nodes/Readme.md @@ -0,0 +1,9 @@ +### 2581.Count-Number-of-Possible-Root-Nodes + +既然题目问的是“有多少节点作为根符合要求”,那么我们自然就会思考遍历每个节点作为根的情况。因为节点总数是1e5,所以我们只能用线性的时间遍历完整棵树,并且对于每个根的情况下,用o(1)的时候做出判断。 + +对于这种题目,有一种常见的套路就是“移根”。假设当前节点A作为根时,答案为a,那么以A的某个邻接节点B未做根时,答案能否快速从a转化而来呢? + +假设当前节点A作为根时,它对应的guesses里面有x个顺序的猜想(猜对了),y个逆序的猜想(猜错了)。那么我们转而考虑以B为根时,顺逆序唯一改变的边其实就只有AB之间的路径。所以如果AB边原本是一个顺序的猜想,那么此刻就会变成逆序;如果AB边原本是一个逆序的猜想,那么此刻就会变成顺序。 + +所以本题的做法就是,先以任意节点(比如说0)为根,一遍dfs计算有多少正确的guess,假设叫做count。然后递归处理它相邻的节点作为根的情况,只需要考察这条相邻边的正逆序变化改变了多少猜想,更新count即可。 diff --git a/Tree/2858.Minimum-Edge-Reversals-So-Every-Node-Is-Reachable/2858.Minimum-Edge-Reversals-So-Every-Node-Is-Reachable.cpp b/Tree/2858.Minimum-Edge-Reversals-So-Every-Node-Is-Reachable/2858.Minimum-Edge-Reversals-So-Every-Node-Is-Reachable.cpp new file mode 100644 index 000000000..04a9aeeb6 --- /dev/null +++ b/Tree/2858.Minimum-Edge-Reversals-So-Every-Node-Is-Reachable/2858.Minimum-Edge-Reversals-So-Every-Node-Is-Reachable.cpp @@ -0,0 +1,53 @@ +class Solution { + vector> next[100005]; + vectorrets; +public: + vector minEdgeReversals(int n, vector>& edges) + { + for (auto& edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].push_back({b, 1}); + next[b].push_back({a, -1}); + } + + int count = dfs1(0, -1); + + rets.resize(n); + + dfs2(0, -1, count); + + return rets; + } + + int dfs1(int cur, int parent) + { + int ret = 0; + for (auto& [nxt, dir]: next[cur]) + { + if (nxt==parent) continue; + if (dir==1) + ret += dfs1(nxt, cur); + else + { + ret += dfs1(nxt, cur) + 1; + } + } + return ret; + } + + void dfs2(int cur, int parent, int count) + { + rets[cur] = count; + for (auto& [nxt, dir]: next[cur]) + { + if (nxt==parent) continue; + if (dir == 1) + dfs2(nxt, cur, count+1); + else + dfs2(nxt, cur, count-1); + } + } + + +}; diff --git a/Tree/2858.Minimum-Edge-Reversals-So-Every-Node-Is-Reachable/Readme.md b/Tree/2858.Minimum-Edge-Reversals-So-Every-Node-Is-Reachable/Readme.md new file mode 100644 index 000000000..ac266b68d --- /dev/null +++ b/Tree/2858.Minimum-Edge-Reversals-So-Every-Node-Is-Reachable/Readme.md @@ -0,0 +1,7 @@ +### 2858.Minimum-Edge-Reversals-So-Every-Node-Is-Reachable + +典型的移根技巧。 + +先用一遍DFS,以node 0为根遍历全树,计算node的reversal edge的数目count. + +然后第二遍DFS,从node 0开始。当dfs从节点i转移至邻接的节点j时,以节点i为根的树的reversal edge count,与节点j为根的树的reversal edge count,其实只相差了"i->j"这条边而已。如果这条边对于i而言是顺边,那么对于j而言就是逆边。反之亦然。所以他们之间的结果只是相差+1/-1而已。 diff --git a/Tree/2920.Maximum-Points-After-Collecting-Coins-From-All-Nodes/2920.Maximum-Points-After-Collecting-Coins-From-All-Nodes.cpp b/Tree/2920.Maximum-Points-After-Collecting-Coins-From-All-Nodes/2920.Maximum-Points-After-Collecting-Coins-From-All-Nodes.cpp new file mode 100644 index 000000000..9238a78ac --- /dev/null +++ b/Tree/2920.Maximum-Points-After-Collecting-Coins-From-All-Nodes/2920.Maximum-Points-After-Collecting-Coins-From-All-Nodes.cpp @@ -0,0 +1,53 @@ +class Solution { + int memo[100005][14]; + vectornext[100005]; +public: + int maximumPoints(vector>& edges, vector& coins, int k) + { + int n = edges.size()+1; + for (int i=0; i& coins, int k) + { + if (reduced >= 13) reduced = 13; + + if (memo[cur][reduced]!=INT_MIN/2) + return memo[cur][reduced]; + + int sum1 = helper(coins[cur], reduced) - k; + for (int nxt: next[cur]) + { + if (nxt == parent) continue; + sum1 += dfs(nxt, cur, reduced, coins, k); + } + + int sum2 = helper(coins[cur], reduced)/2; + for (int nxt: next[cur]) + { + if (nxt == parent) continue; + sum2 += dfs(nxt, cur, reduced+1, coins, k); + } + + memo[cur][reduced] = max(sum1, sum2); + return memo[cur][reduced]; + } +}; diff --git a/Tree/2920.Maximum-Points-After-Collecting-Coins-From-All-Nodes/Readme.md b/Tree/2920.Maximum-Points-After-Collecting-Coins-From-All-Nodes/Readme.md new file mode 100644 index 000000000..4bc7d7ee3 --- /dev/null +++ b/Tree/2920.Maximum-Points-After-Collecting-Coins-From-All-Nodes/Readme.md @@ -0,0 +1,5 @@ +### 2920.Maximum-Points-After-Collecting-Coins-From-All-Nodes + +常规的DFS。对于每个节点,我们需要知道它的祖先节点们总共做了几次“减半”操作,才能确定自身能够得到多少coins。所以DFS的参数里必须包含这个量。 + +对于DFS而言,我们总是与记忆化结合,可以优化时间复杂度。根据参数,记忆化数组需要的维度是`T*N`,其中T是对每个节点而言可能做多的“减半”操作次数。注意到任意节点的初始coins是1e4,这就意味着祖先最多累积13次减半操作,就可以让自身的coins变为零而不再变化。所以DFS的空间复杂度是可以接受的。 diff --git a/Tree/2925.Maximum-Score-After-Applying-Operations-on-a-Tree/2925.Maximum-Score-After-Applying-Operations-on-a-Tree.cpp b/Tree/2925.Maximum-Score-After-Applying-Operations-on-a-Tree/2925.Maximum-Score-After-Applying-Operations-on-a-Tree.cpp new file mode 100644 index 000000000..39a7c0038 --- /dev/null +++ b/Tree/2925.Maximum-Score-After-Applying-Operations-on-a-Tree/2925.Maximum-Score-After-Applying-Operations-on-a-Tree.cpp @@ -0,0 +1,51 @@ +using LL = long long; +class Solution { + vectornext[20005]; + LL subtree[20005]; + vectorvalues; +public: + long long maximumScoreAfterOperations(vector>& edges, vector& values) + { + this->values = values; + for (auto& edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].push_back(b); + next[b].push_back(a); + } + + dfs0(0, -1); + return dfs(0, -1); + + } + + LL dfs0(int cur, int parent) + { + LL sum = values[cur]; + for (int nxt: next[cur]) + { + if (nxt==parent) continue; + sum += dfs0(nxt, cur); + } + subtree[cur] = sum; + return sum; + } + + + LL dfs(int cur, int parent) + { + if (next[cur].size()==1 && cur!=0) + { + return 0; + } + + LL sum = values[cur]; + for (int nxt: next[cur]) + { + if (nxt==parent) continue; + sum += dfs(nxt, cur); + } + + return max(sum, subtree[cur]-values[cur]); + } +}; diff --git a/Tree/2925.Maximum-Score-After-Applying-Operations-on-a-Tree/Readme.md b/Tree/2925.Maximum-Score-After-Applying-Operations-on-a-Tree/Readme.md new file mode 100644 index 000000000..d3768cfd2 --- /dev/null +++ b/Tree/2925.Maximum-Score-After-Applying-Operations-on-a-Tree/Readme.md @@ -0,0 +1,9 @@ +### 2925.Maximum-Score-After-Applying-Operations-on-a-Tree + +我们令dfs(cur)表示以cur为根的子树保持healthy时,能够取得的最高分。 + +我们容易发现,从root一路往下时,只要在某个节点node采取了“不取”的策略,那么之后就没有继续往下走的必要了。因为从root到node再到它的任何一个leaf,这个path sum肯定不会是零。所以我们必然会贪心地将node以下所有节点的value都取走。 + +所以我们在dfs的过程中,如果遍历到了某个节点,其隐含的意思就是从root到node之间的路径都“扫荡”光了。此时如果node依然采用了“取”的策略,那么我们必须保证node的所有子树path都是healthy的才行。于是就是递归处理dfs(nxt)即可。 + +边界条件是对于leaf node,它必须不取,否则连它也取的话,则意味着从root到leaf的path每个节点都取光了,必然不是healthy。 diff --git a/Tree/2973.Find-Number-of-Coins-to-Place-in-Tree-Nodes/2973.Find-Number-of-Coins-to-Place-in-Tree-Nodes.cpp b/Tree/2973.Find-Number-of-Coins-to-Place-in-Tree-Nodes/2973.Find-Number-of-Coins-to-Place-in-Tree-Nodes.cpp new file mode 100644 index 000000000..333479b16 --- /dev/null +++ b/Tree/2973.Find-Number-of-Coins-to-Place-in-Tree-Nodes/2973.Find-Number-of-Coins-to-Place-in-Tree-Nodes.cpp @@ -0,0 +1,46 @@ +using LL = long long; +class Solution { + vectornext[20005]; + vectorchildren[20005]; + vectorrets; +public: + vector placedCoins(vector>& edges, vector& cost) + { + for (auto& edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].push_back(b); + next[b].push_back(a); + } + + rets.resize(cost.size()); + dfs(0, -1, cost); + return rets; + } + + void dfs(int cur, int parent, vector&cost) + { + vectortemp; + for (int nxt: next[cur]) + { + if (nxt==parent) continue; + dfs(nxt, cur, cost); + for (int x:children[nxt]) + temp.push_back(x); + } + temp.push_back(cost[cur]); + + sort(temp.begin(), temp.end()); + int n = temp.size(); + if (n < 3) + rets[cur] = 1; + else + rets[cur] = max(0LL, max(temp[n-3]*temp[n-2]*temp[n-1], temp[0]*temp[1]*temp[n-1])); + + if (n>=1) children[cur].push_back(temp[0]); + if (n>=2) children[cur].push_back(temp[1]); + if (n>=5) children[cur].push_back(temp[n-3]); + if (n>=4) children[cur].push_back(temp[n-2]); + if (n>=3) children[cur].push_back(temp[n-1]); + } +}; diff --git a/Tree/2973.Find-Number-of-Coins-to-Place-in-Tree-Nodes/Readme.md b/Tree/2973.Find-Number-of-Coins-to-Place-in-Tree-Nodes/Readme.md new file mode 100644 index 000000000..1b67960b6 --- /dev/null +++ b/Tree/2973.Find-Number-of-Coins-to-Place-in-Tree-Nodes/Readme.md @@ -0,0 +1,14 @@ +### 2973.Find-Number-of-Coins-to-Place-in-Tree-Nodes + +本题只需要常规的DFS,对于每个节点,假设它的子树的所有的cost都收集在了temp里并保持有序,那么根据题意,收益其实就是这两者的最大值: +``` +max(temp[n-1]*temp[n-2]*temp[n-3], temp[0]*temp[1]*temp[n-1]); +``` +这是因为,三元素乘积的最大值,要么是三个最大正数的乘积,要么是两个最小负数和一个最大正数的乘积。对于前者,我们只需要盲目地取最大的三个数即可(如果不存在三个正数,那么它自然也不会是最优解);对于后者,我们也只需要盲目地取两个最小值和一个最大值即可(如果不存在两个负数,那么它自然也不会是最优解)。 + +综上,我们最多只用到了temp里的五个元素即可。并且在向上传递时,也只需要最多返回五个元素即可。分别是: +1. 最小值temp[0],当n>=1 +2. 次小值temp[1],当n>=2 +3. 第三大值temp[n-3],当n>=5时才能保证该元素不与2重复。 +4. 第二大值temp[n-2],当n>=4时才能保证该元素不与2重复。 +5. 第一大值temp[n-1],当n>=3时才能保证该元素不与2重复。 diff --git a/Tree/3203.Find-Minimum-Diameter-After-Merging-Two-Trees/3203.Find-Minimum-Diameter-After-Merging-Two-Trees.cpp b/Tree/3203.Find-Minimum-Diameter-After-Merging-Two-Trees/3203.Find-Minimum-Diameter-After-Merging-Two-Trees.cpp new file mode 100644 index 000000000..9039d329e --- /dev/null +++ b/Tree/3203.Find-Minimum-Diameter-After-Merging-Two-Trees/3203.Find-Minimum-Diameter-After-Merging-Two-Trees.cpp @@ -0,0 +1,63 @@ +class Solution { +public: + int minimumDiameterAfterMerge(vector>& edges1, vector>& edges2) + { + int a = treeDiameter(edges1); + int b = treeDiameter(edges2); + return max({(a+1)/2+(b+1)/2+1, a, b}); + } + + int treeDiameter(vector>& edges) + { + int n = edges.size()+1; + vector>next(n); + for (auto edge: edges) + { + next[edge[0]].push_back(edge[1]); + next[edge[1]].push_back(edge[0]); + } + auto t1 = bfs(next, 0); + auto t2 = bfs(next, t1.first); + return t2.second; + } + + pair bfs(vector>&next, int u) + { + int n = next.size(); + vectordis(n, -1); + queue q; + q.push(u); + + dis[u] = 0; + + while (!q.empty()) + { + int t = q.front(); + q.pop(); + + for (auto it = next[t].begin(); it != next[t].end(); it++) + { + int v = *it; + if (dis[v] == -1) + { + q.push(v); + dis[v] = dis[t] + 1; + } + } + } + + int maxDis = 0; + int nodeIdx = 0; + + for (int i = 0; i < n; i++) + { + if (dis[i] > maxDis) + { + maxDis = dis[i]; + nodeIdx = i; + } + } + return make_pair(nodeIdx, maxDis); + } + +}; diff --git a/Tree/3203.Find-Minimum-Diameter-After-Merging-Two-Trees/Readme.md b/Tree/3203.Find-Minimum-Diameter-After-Merging-Two-Trees/Readme.md new file mode 100644 index 000000000..07ab373b9 --- /dev/null +++ b/Tree/3203.Find-Minimum-Diameter-After-Merging-Two-Trees/Readme.md @@ -0,0 +1,9 @@ +### 3203.Find-Minimum-Diameter-After-Merging-Two-Trees + +关于树的最大路径(直径),我们已经有`1245.Tree-Diameter`的做法。 + +在本题中,我们要求联通后的树的直径最小,那么联通的两个点,必定在各自树的直径的中点位置。这可以用简单的反证法推理:假设联通点是树的节点A,那么根据直径定义,我们需要寻找A到树里离它的最远端点。我们可以至少找到这样一条路径:A先到直径的中点M,再从M走到直径的一个端点(固定为d/2长度)。这条路径显然总长于将A点直接设置于M处的方案。故联通点设置在M处,可以最小化A离它最短端点。 + +所以本题的一个解就是 `ceil(d1/2) + ceil(d2/2) + 1`。 + +但是注意,联通树的最大直径不一定要一定同时经过树1和树2。比如,如果树1远远大于树2,那么d1本身就可能是联通树的最大直径。类似的d2也是。所以本题是要在三个可能答案中取最大的那个。 diff --git a/Tree/662.Maximum-Width-of-Binary-Tree/662.Maximum-Width-of-Binary-Tree.cpp b/Tree/662.Maximum-Width-of-Binary-Tree/662.Maximum-Width-of-Binary-Tree.cpp index 63d3f829d..34cb9d379 100644 --- a/Tree/662.Maximum-Width-of-Binary-Tree/662.Maximum-Width-of-Binary-Tree.cpp +++ b/Tree/662.Maximum-Width-of-Binary-Tree/662.Maximum-Width-of-Binary-Tree.cpp @@ -12,33 +12,42 @@ class Solution { int widthOfBinaryTree(TreeNode* root) { root->val = 0; - dequeq; + vectorq; q.push_back(root); int ans = 1; while (!q.empty()) { int len = q.size(); - ans = max(ans, q.back()->val - q.front()->val + 1); + ans = max(ans, q.back()->val - q[0]->val + 1); - int base = q.front()->val; + vectorvals; + vectorp; - while (len--) - { - TreeNode* node = q.front(); - q.pop_front(); - + for (int i=0; ileft) { - node->left->val = (node->val-base)*2+1; - q.push_back(node->left); + vals.push_back((long long)node->val * 2 + 1); + p.push_back(node->left); } if (node->right) { - node->right->val = (node->val-base)*2+2; - q.push_back(node->right); + vals.push_back((long long)node->val * 2 + 2); + p.push_back(node->right); } } + + if (!p.empty()) + { + for (int i=0; ival = (int)(vals[i] - vals[0]); + } + } + + q = p; } return ans; } diff --git a/Tree/834.Sum-of-Distances-in-Tree/834.Sum-of-Distances-in-Tree.cpp b/Tree/834.Sum-of-Distances-in-Tree/834.Sum-of-Distances-in-Tree.cpp index 28153a953..61d19f686 100644 --- a/Tree/834.Sum-of-Distances-in-Tree/834.Sum-of-Distances-in-Tree.cpp +++ b/Tree/834.Sum-of-Distances-in-Tree/834.Sum-of-Distances-in-Tree.cpp @@ -1,75 +1,68 @@ class Solution { - unordered_map>Children; - vectorSubLeaves; - vectorresults; - + int visited[30005]; + int count[30005]; + vector next[30005]; + vector rets; + int n; public: - vector sumOfDistancesInTree(int N, vector>& edges) + vector sumOfDistancesInTree(int n, vector>& edges) { - SubLeaves.assign(N,0); - results.assign(N,0); - - unordered_map>Map; - for (int i=0; in = n; + rets.resize(n); + for (auto& edge: edges) { - Map[edges[i][0]].insert(edges[i][1]); - Map[edges[i][1]].insert(edges[i][0]); + next[edge[0]].push_back(edge[1]); + next[edge[1]].push_back(edge[0]); } - - queueq; - q.push(0); - while (!q.empty()) - { - int root = q.front(); - q.pop(); - for (auto child:Map[root]) - { - Children[root].insert(child); - Map[child].erase(root); - q.push(child); - } - } - - int root = 0; - int temp = DFS1(root); - int AllSum = DFS2(root); - - results[0] = AllSum; - DFS3(root); - - return results; + + visited[0] = 1; + dfs(0); // Compute the substree sizes + + for (int i=1; ichild的连接关系(注意,因为是树,反向的关系我们不记录在Hash表里)。 +本题中我们可以任意选取一个节点定义为root(比如说0号节点),然后可以用DFS遍历整棵树,可以得到两个信息:每个节点所对应的子树的节点个数(记做count[i]),所有节点到根(0号节点)的距离之和。 -然后我们可以做什么呢?比较容易用递归办到的,就是root到所有子节点的距离之和,标记为f(root)。那么接下来,如何得到一个子节点child到其他所有节点的距离之和呢?难道要以该节点为根重新展开一张树吗?其实我们可以考虑f(parent)和f(child)之间的关系。 +那么接下来,如何得到一个非根节点到其他所有节点的距离之和呢?难道要以该节点为根重新展开一张树吗?这里介绍一种非常常见的“移根”的思想。 -假设已知f(parent),如果我们把起点从parent迁到child的话,那么到所有除child子树之外的节点,距离都增加了1;到所有child子树的节点,距离都减少了1. +我们令f(node)该节点到所有节点的距离之和。假设已知f(parent),我们可以考虑f(parent)和它的一个f(child)之间的关系。我们把根从parent迁到child之后,所有属于child子树的节点离根的距离都减少了1。剩余的其他所有节点,其到根的距离都增加了1. 所以有如下的关系 ``` -f(child) = f(parent)+(除child子树之外所有节点的数目)-(child子树的节点数目) +f(child) = f(parent) - (child子树的节点数目) + (N - child子树的节点数目) ``` 可见,所有的f都可以自上而下通过递归得到。 +类似的思想有一个更常见的题目:求一个数组里每个节点到其他节点的距离之和。如1685,2121. -[Leetcode Link](https://leetcode.com/problems/sum-of-distances-in-tree) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/sum-of-distances-in-tree) diff --git a/Tree/863.All-Nodes-Distance-K-in-Binary-Tree/863.All-Nodes-Distance-K-in-Binary-Tree.cpp b/Tree/863.All-Nodes-Distance-K-in-Binary-Tree/863.All-Nodes-Distance-K-in-Binary-Tree_v1.cpp similarity index 100% rename from Tree/863.All-Nodes-Distance-K-in-Binary-Tree/863.All-Nodes-Distance-K-in-Binary-Tree.cpp rename to Tree/863.All-Nodes-Distance-K-in-Binary-Tree/863.All-Nodes-Distance-K-in-Binary-Tree_v1.cpp diff --git a/Tree/863.All-Nodes-Distance-K-in-Binary-Tree/863.All-Nodes-Distance-K-in-Binary-Tree_v2.cpp b/Tree/863.All-Nodes-Distance-K-in-Binary-Tree/863.All-Nodes-Distance-K-in-Binary-Tree_v2.cpp new file mode 100644 index 000000000..55e7743f9 --- /dev/null +++ b/Tree/863.All-Nodes-Distance-K-in-Binary-Tree/863.All-Nodes-Distance-K-in-Binary-Tree_v2.cpp @@ -0,0 +1,61 @@ +/** + * Definition for a binary tree node. + * struct TreeNode { + * int val; + * TreeNode *left; + * TreeNode *right; + * TreeNode(int x) : val(x), left(NULL), right(NULL) {} + * }; + */ +class Solution { + unordered_map>adj; +public: + vector distanceK(TreeNode* root, TreeNode* target, int K) + { + if (K==0) return {target->val}; + + DFS(root); + + queue>q; + unordered_setvisited; + + q.push({target->val,0}); + visited.insert(target->val); + + vectorresults; + while (!q.empty()) + { + auto [cur, step] = q.front(); + q.pop(); + if (step>K) break; + + for (int next: adj[cur]) + { + if (visited.find(next)!=visited.end()) + continue; + q.push({next,step+1}); + visited.insert(next); + + if (step+1==K) results.push_back(next); + } + } + return results; + } + + void DFS(TreeNode* node) + { + if (node==NULL) return; + if (node->left!=NULL) + { + adj[node->val].push_back(node->left->val); + adj[node->left->val].push_back(node->val); + DFS(node->left); + } + if (node->right!=NULL) + { + adj[node->val].push_back(node->right->val); + adj[node->right->val].push_back(node->val); + DFS(node->right); + } + } +}; diff --git a/Tree/863.All-Nodes-Distance-K-in-Binary-Tree/Readme.md b/Tree/863.All-Nodes-Distance-K-in-Binary-Tree/Readme.md index f083b3eb5..c32ae972b 100644 --- a/Tree/863.All-Nodes-Distance-K-in-Binary-Tree/Readme.md +++ b/Tree/863.All-Nodes-Distance-K-in-Binary-Tree/Readme.md @@ -1,5 +1,6 @@ ### 863.All-Nodes-Distance-K-in-Binary-Tree +#### 解法1: 本题的关键点是,任何两个节点AB之间的路径,都可以想象成有一个“拐点”O,其中OA是沿左子树向下的路径,OB是沿右子树向下的路径。我们可以递归处理每一个节点node,设想它是这个拐点,A是target并位于其中一个分支,那么如何在另一个分支中找到B?显然,假设我们能得到target到node->left之间的距离是t,那么我们只需要从node->right出发往下走k-2-t步,所抵达的节点就都是符合要求的B点。同理,如果target位于node->right分支,类似的处理。 需要单独处理的情况就是```node==target```,此时我们找的就是从node开始往下走k步到达的节点。 @@ -8,5 +9,8 @@ 本题和```543.Diameter-of-Binary-Tree```的套路是一样的。也就是说,对于树里面任何两点之间的距离,优先去想它的拐点。 +#### 解法2: +将树的形式转化为图的形式。这样就可以target为起点进行BFS,寻找距离为k的点。 -[Leetcode Link](https://leetcode.com/problems/all-nodes-distance-k-in-binary-tree) \ No newline at end of file + +[Leetcode Link](https://leetcode.com/problems/all-nodes-distance-k-in-binary-tree) diff --git a/Trie/1268.Search-Suggestions-System/1268.Search-Suggestions-System_v1.cpp b/Trie/1268.Search-Suggestions-System/1268.Search-Suggestions-System_v1.cpp index d68782856..ec9dcebb9 100644 --- a/Trie/1268.Search-Suggestions-System/1268.Search-Suggestions-System_v1.cpp +++ b/Trie/1268.Search-Suggestions-System/1268.Search-Suggestions-System_v1.cpp @@ -1,26 +1,33 @@ -class Solution { - class TrieNode +class TrieNode +{ + public: + TrieNode* next[26]; + bool isEnd; + TrieNode() { - public: - TrieNode* next[26]; - int isEnd; - TrieNode() - { - for (int i=0; i<26; i++) - next[i]=NULL; - isEnd=0; - } - }; - TrieNode* root; - + for (int i=0; i<26; i++) + next[i]=NULL; + isEnd=0; + } +}; + +class Solution { + TrieNode* root; public: vector> suggestedProducts(vector& products, string searchWord) { root = new TrieNode(); - for (auto word: products) - insert(word); - - cout<next[ch-'a']==NULL) + node->next[ch-'a'] = new TrieNode(); + node = node->next[ch-'a']; + } + node->isEnd = true; + } vector>rets; TrieNode* node=root; @@ -38,50 +45,30 @@ class Solution { node = node->next[ch-'a']; word.push_back(ch); vectorans; - string temp = ""; - DFS(node,ans,temp); + string str = ""; + DFS(node,ans,str); - while (ans.size()>3) - ans.pop_back(); for (int j=0; jnext[ch-'a']==NULL) - node->next[ch-'a']=new TrieNode(); - node=node->next[ch-'a']; - } - node->isEnd+=1; - } + } - void DFS(TrieNode* node, vector&ans, string temp) - { - if (node->isEnd>0) - { - for (int k=0; kisEnd; k++) - ans.push_back(temp); - } - + void DFS(TrieNode* node, vector&ans, string& str) + { + if (ans.size()==3) return; + + if (node->isEnd == true) + ans.push_back(str); for (int i=0; i<26; i++) { - if (ans.size()>3) break; if (node->next[i]==NULL) continue; - temp.push_back('a'+i); - DFS(node->next[i],ans, temp); - temp.pop_back(); + str.push_back('a'+i); + DFS(node->next[i],ans, str); + str.pop_back(); } } - - }; diff --git a/Trie/1268.Search-Suggestions-System/Readme.md b/Trie/1268.Search-Suggestions-System/Readme.md index 46a03c266..540049610 100644 --- a/Trie/1268.Search-Suggestions-System/Readme.md +++ b/Trie/1268.Search-Suggestions-System/Readme.md @@ -1,14 +1,12 @@ ### 1268.Search-Suggestions-System #### 解法1: -比较严谨的做法是用Trie。根据searchWord的前缀(比如说前k个字母)在Trie中推进直至某个节点,然后以这个节点为根,用DFS的方法、根据先左后右的规则,找出最多三个合法的子路径(即能够拼成一个product)。 - -特别注意,此题中的product允许有重复,所以在TrieNode定义中,传统的bool isEnd需要改造为int count,并在构造整颗树的时候用它来计算有多少个重复(即共享完全相同的路径)的product。在DFS的过程中,如果推进到某个节点的count>1,说明会有多于1个product有着相同的名字,我们需要把它们都算上。 +比较严谨的做法是用Trie。先将所有的products的单词建树。然后,根据searchWord的前缀(比如说前k个字母)在Trie中推进直至某个节点node,然后以node为根进行DFS,根据先序遍历的规则(优先走左下方的子树),找出字典序最小的三个到叶子节点的子路径。 #### 解法2: 有一种非常取巧的方法,那就是将所有product按照字典排序。然后查找searchWord在里面的位置(用lower_bound定位),得到的就是字典序恰好大于等于searchWord的那个单词。我们查看以这个单词开始的连续三个单词,是否与searchWord共享指定书目的前缀,是的话就相应收入囊中。 -这种方法可以不必理会products中是否存在重复。但是第一步排序的过程其实比较耗时,不过题目给出了```1 <= Σ products[i].length <= 2 * 10^4```,这就是暗示了字符串排序的复杂度是可以接受的。 +这种方法第一步排序的过程其实比较耗时,不过题目给出了```1 <= Σ products[i].length <= 2 * 10^4```,这就是暗示了字符串排序的复杂度是可以接受的。 -[Leetcode Link](https://leetcode.com/problems/search-suggestions-system) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/search-suggestions-system) diff --git a/Trie/208.Implement-Trie--Prefix-Tree/Readme.md b/Trie/208.Implement-Trie--Prefix-Tree/Readme.md index b00c62ee4..d7aed2b8b 100644 --- a/Trie/208.Implement-Trie--Prefix-Tree/Readme.md +++ b/Trie/208.Implement-Trie--Prefix-Tree/Readme.md @@ -7,4 +7,4 @@ 4. 在Trie树中找指定的前缀(不需要找到叶子节点) -[Leetcode Link](https://leetcode.com/problems/implement-trie--prefix-tree) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/implement-trie-prefix-tree) \ No newline at end of file diff --git a/Trie/2416.Sum-of-Prefix-Scores-of-Strings/2416.Sum-of-Prefix-Scores-of-Strings.cpp b/Trie/2416.Sum-of-Prefix-Scores-of-Strings/2416.Sum-of-Prefix-Scores-of-Strings.cpp new file mode 100644 index 000000000..2dfee6d00 --- /dev/null +++ b/Trie/2416.Sum-of-Prefix-Scores-of-Strings/2416.Sum-of-Prefix-Scores-of-Strings.cpp @@ -0,0 +1,47 @@ +class Solution { + class TrieNode + { + public: + TrieNode* next[26]; + int count; + TrieNode() + { + for (int i=0; i<26; i++) + next[i] = NULL; + count = 0; + } + }; +public: + vector sumPrefixScores(vector& words) { + TrieNode* root = new TrieNode(); + + for (auto& word: words) + { + TrieNode* node = root; + for (char ch: word) + { + if (node->next[ch-'a']==NULL) + node->next[ch-'a'] = new TrieNode(); + node = node->next[ch-'a']; + node->count += 1; + } + } + + vectorrets; + for (auto& word: words) + { + TrieNode* node = root; + int score = 0; + for (char ch: word) + { + if (node->next[ch-'a'] == NULL) + break; + node = node->next[ch-'a']; + score += node->count; + } + rets.push_back(score); + } + + return rets; + } +}; diff --git a/Trie/2416.Sum-of-Prefix-Scores-of-Strings/Readme.md b/Trie/2416.Sum-of-Prefix-Scores-of-Strings/Readme.md new file mode 100644 index 000000000..1b6fb0621 --- /dev/null +++ b/Trie/2416.Sum-of-Prefix-Scores-of-Strings/Readme.md @@ -0,0 +1,3 @@ +### 2416.Sum-of-Prefix-Scores-of-Strings + +常规的字典树的应用。在建树的时候,我们给每个节点node标记一个count,表示从root->node的这条路径是多少个word的前缀。在查询的时候,我们在字典树里遍历word的前缀,所到之处的count就是当前前缀的score了。 diff --git a/Trie/2479.Maximum-XOR-of-Two-Non-Overlapping-Subtrees/2479.Maximum-XOR-of-Two-Non-Overlapping-Subtrees.cpp b/Trie/2479.Maximum-XOR-of-Two-Non-Overlapping-Subtrees/2479.Maximum-XOR-of-Two-Non-Overlapping-Subtrees.cpp new file mode 100644 index 000000000..7147bfafe --- /dev/null +++ b/Trie/2479.Maximum-XOR-of-Two-Non-Overlapping-Subtrees/2479.Maximum-XOR-of-Two-Non-Overlapping-Subtrees.cpp @@ -0,0 +1,97 @@ +using LL = long long; + +class Solution { + vectornext[50005]; + LL val[50005]; + vectorvalues; + + class TrieNode + { + public: + TrieNode* next[2]; + TrieNode(){ + for (int i=0; i<2; i++) + next[i] = NULL; + } + }; + TrieNode* root; + + LL ret = 0; + +public: + void insert(LL num) + { + TrieNode* node = root; + for (int i=63; i>=0; i--) + { + int d = ((num>>i)&1); + if (node->next[d]==NULL) + node->next[d] = new TrieNode(); + node = node->next[d]; + } + } + + LL find(LL num) + { + TrieNode* node = root; + if (root->next[0]==NULL && root->next[1]==NULL) return 0; + LL ret = 0; + for (int i=63; i>=0; i--) + { + int d = ((num>>i)&1); + if (node->next[1-d]!=NULL) + { + ret += (1LL<next[1-d]; + } + else + { + ret += 0; + node = node->next[d]; + } + } + return ret; + } + + long long maxXor(int n, vector>& edges, vector& values) + { + this->values = values; + for (auto& edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].push_back(b); + next[b].push_back(a); + } + + root = new TrieNode(); + + dfs(0, -1); + + dfs2(0, -1); + + return ret; + } + + LL dfs(int cur, int parent) + { + LL v = values[cur]; + for (int nxt: next[cur]) + { + if (nxt==parent) continue; + v = v + dfs(nxt, cur); + } + val[cur] = v; + return v; + } + + void dfs2(int cur, int parent) + { + for (int nxt: next[cur]) + { + if (nxt==parent) continue; + ret = max(ret, find(val[nxt])); + dfs2(nxt, cur); + insert(val[nxt]); + } + } +}; diff --git a/Trie/2479.Maximum-XOR-of-Two-Non-Overlapping-Subtrees/Readme.md b/Trie/2479.Maximum-XOR-of-Two-Non-Overlapping-Subtrees/Readme.md new file mode 100644 index 000000000..9a4d84b2f --- /dev/null +++ b/Trie/2479.Maximum-XOR-of-Two-Non-Overlapping-Subtrees/Readme.md @@ -0,0 +1,7 @@ +### 2479.Maximum-XOR-of-Two-Non-Overlapping-Subtrees + +显然我们可以通过一遍dfs求得每个节点对应的子树元素和。另外,对于一个数组pool,和一个固定的元素n,我们知道可以用`1707.Maximum-XOR-With-an-Element-From-Array`的算法,高效地得到n在pool里的最大XOR值。但是本题的难点在于,如何添加元素进入pool的顺序,以及求max XOR的顺序,因为我们需要排除两个重合子树被XOR的可能性。 + +本题的精彩解法是设计一个后序遍历的dfs。对于每个节点node,我们依次处理它的子节点v,先在pool里查询v可以得到的最大XOR值,然后递归处理v,最后将v加入pool。这样处理完所有的孩子节点,我们会发现,所有的query都不会发生在节点v与它的直系下级之间,只会发生在v与亲戚节点之间。至于node本身,我们不会在当前的dfs里处理,而是由其parent所在的dfs里进行query、递归处理和添加。再次注意这三者之间的顺序,这保证了query任何一个节点的时候,它的子树都没有被处理,子节点和本身都还没有被添加进pool里。 + +另外,因为所有的节点都是依次加入pool的,而且入pool之前都与pool内的元素query过一次最大XOR的寻找,所以这个方法不会遗漏任何配对。 diff --git a/Trie/2935.Maximum-Strong-Pair-XOR-II/2935.Maximum-Strong-Pair-XOR-II.cpp b/Trie/2935.Maximum-Strong-Pair-XOR-II/2935.Maximum-Strong-Pair-XOR-II.cpp new file mode 100644 index 000000000..68771f3e6 --- /dev/null +++ b/Trie/2935.Maximum-Strong-Pair-XOR-II/2935.Maximum-Strong-Pair-XOR-II.cpp @@ -0,0 +1,80 @@ +class Solution { + class TrieNode + { + public: + TrieNode* next[2]; + int count = 0; + TrieNode(){ + for (int i=0; i<2; i++) + next[i] = NULL; + } + }; + TrieNode* root; + +public: + int maximumStrongPairXor(vector& nums) + { + root = new TrieNode(); + + sort(nums.begin(), nums.end()); + int j = 0; + int ret = INT_MIN/2; + for (int i=0; i=0; k--) + { + int bit = ((num>>k)&1); + if (node->next[bit]==NULL) + node->next[bit] = new TrieNode(); + node = node->next[bit]; + node->count+=1; + } + } + + void remove(int num) + { + TrieNode* node = root; + for (int k=31; k>=0; k--) + { + int bit = ((num>>k)&1); + node = node->next[bit]; + node->count-=1; + } + } + + int dfs(int num, TrieNode* node, int k) + { + if (k==-1) return 0; + int bit = (num>>k)&1; + if (bit == 0) + { + if (node->next[1] && node->next[1]->count > 0) + return dfs(num, node->next[1], k-1) + (1<next[0] && node->next[0]->count > 0) + return dfs(num, node->next[0], k-1); + } + else + { + if (node->next[0] && node->next[0]->count > 0) + return dfs(num, node->next[0], k-1) + (1<next[1] && node->next[1]->count > 0) + return dfs(num, node->next[1], k-1); + } + + return INT_MIN/2; + } +}; diff --git a/Trie/2935.Maximum-Strong-Pair-XOR-II/Readme.md b/Trie/2935.Maximum-Strong-Pair-XOR-II/Readme.md new file mode 100644 index 000000000..78d055414 --- /dev/null +++ b/Trie/2935.Maximum-Strong-Pair-XOR-II/Readme.md @@ -0,0 +1,8 @@ +### 2935.Maximum-Strong-Pair-XOR-II + +观察`|x - y| <= min(x, y)`, 假设x是其中较大的那个,很容易推得`y<=x<=2y`。所以我们将nums从小到大排序之后,考察某个数作为y时,可以将一段滑窗内的元素[y,2y]加入一个集合,根据y在这个集合里找能与y异或得到最大值的那个元素。并且,我们发现随着y的移动,这个滑窗的移动也是单调的,说明每个式子进入集合与移出集合都只需要操作一次,时间复杂度是o(N). + +对于求最大XOR pair而言,这样的“集合”必然是用Trie。将符合y条件的数字加入Trie之后,从高到低遍历y的每个bit位:如果y的bit是1,那么我们就选择在Trie里向下走0的分支(如果存在的话),反之我们就在Trie里向下走1的分支。这样走到底之后,你选择的路径所对应的数字就是能与y异或得到最大的结果。 + +PS:在Trie里实时加入一条路径很简单,但是如何移除一条路径呢?显然不能盲目地删除该路径上的所有节点,因为它可能被其他路径共享。技巧是,给每个节点标记一个计数器。加入一条路径时,将沿路的节点的计数器加一;反之删除一个条路径时,将沿路的节点的计数器减一。如果某个节点的计数器为零,意味着该分支已经“虚拟地”从Trie里移出了,就不能再被访问了。 + diff --git a/Trie/2977.Minimum-Cost-to-Convert-String-II/2977.Minimum-Cost-to-Convert-String-II_v1.cpp b/Trie/2977.Minimum-Cost-to-Convert-String-II/2977.Minimum-Cost-to-Convert-String-II_v1.cpp new file mode 100644 index 000000000..f3b2e86c7 --- /dev/null +++ b/Trie/2977.Minimum-Cost-to-Convert-String-II/2977.Minimum-Cost-to-Convert-String-II_v1.cpp @@ -0,0 +1,66 @@ +using LL = long long; +class Solution { +public: + long long minimumCost(string source, string target, vector& original, vector& changed, vector& cost) + { + unordered_setSet; + Set.insert(original.begin(), original.end()); + Set.insert(changed.begin(), changed.end()); + unordered_mapMap; + int idx = 0; + for (string x: Set) + { + Map[x] = idx; + idx++; + } + + int n = Set.size(); + LL d[n][n]; + for (int i=0; idp(m+1); + dp[0] = 0; + + for (int i=1; i<=m; i++) + { + dp[i] = LLONG_MAX/2; + if (source[i]==target[i]) + dp[i] = dp[i-1]; + + string a; + string b; + for (int j=i; j>=1; j--) + { + a = source.substr(j,1) + a; + b = target.substr(j,1) + b; + + if (Map.find(a)!=Map.end() && Map.find(b)!=Map.end()) + dp[i] = min(dp[i], dp[j-1] + d[Map[a]][Map[b]]); + } + } + + if (dp[m]==LLONG_MAX/2) return -1; + + return dp[m]; + } +}; diff --git a/Trie/2977.Minimum-Cost-to-Convert-String-II/2977.Minimum-Cost-to-Convert-String-II_v2.cpp b/Trie/2977.Minimum-Cost-to-Convert-String-II/2977.Minimum-Cost-to-Convert-String-II_v2.cpp new file mode 100644 index 000000000..479d6b19c --- /dev/null +++ b/Trie/2977.Minimum-Cost-to-Convert-String-II/2977.Minimum-Cost-to-Convert-String-II_v2.cpp @@ -0,0 +1,102 @@ +using LL = long long; +class TrieNode +{ + public: + TrieNode* next[26]; + int idx; + TrieNode() + { + for (int i=0; i<26; i++) + next[i] = NULL; + idx = -1; + } +}; + +class Solution { + TrieNode* root = new TrieNode(); +public: + long long minimumCost(string source, string target, vector& original, vector& changed, vector& cost) + { + for (auto& s: original) + reverse(s.begin(), s.end()); + + for (auto& s: changed) + reverse(s.begin(), s.end()); + + unordered_setSet; + Set.insert(original.begin(), original.end()); + Set.insert(changed.begin(), changed.end()); + unordered_mapMap; + int idx = 0; + for (string word: Set) + { + Map[word] = idx; + + TrieNode* node = root; + for (char ch: word) + { + if (node->next[ch-'a']==NULL) + node->next[ch-'a'] = new TrieNode(); + node = node->next[ch-'a']; + } + node->idx = idx; + + idx++; + } + + int n = Set.size(); + LL d[n][n]; + for (int i=0; idp(m+1); + dp[0] = 0; + + for (int i=1; i<=m; i++) + { + dp[i] = LLONG_MAX/2; + if (source[i]==target[i]) + dp[i] = dp[i-1]; + + TrieNode* node1 = root; + TrieNode* node2 = root; + for (int j=i; j>=1; j--) + { + if (node1->next[source[j]-'a']==NULL) + break; + if (node2->next[target[j]-'a']==NULL) + break; + node1 = node1->next[source[j]-'a']; + node2 = node2->next[target[j]-'a']; + + int idx1 = node1->idx; + int idx2 = node2->idx; + if (idx1==-1 || idx2==-1) continue; + + dp[i] = min(dp[i], dp[j-1] + d[idx1][idx2]); + } + } + + if (dp[m]==LLONG_MAX/2) return -1; + + return dp[m]; + } +}; diff --git a/Trie/2977.Minimum-Cost-to-Convert-String-II/Readme.md b/Trie/2977.Minimum-Cost-to-Convert-String-II/Readme.md new file mode 100644 index 000000000..558cd466e --- /dev/null +++ b/Trie/2977.Minimum-Cost-to-Convert-String-II/Readme.md @@ -0,0 +1,21 @@ +### 2977.Minimum-Cost-to-Convert-String-II + +本题和`2976. Minimum Cost to Convert String I`类似的思路,只不过2976里构造的最短路径图的顶点是“字母”,而本题里图的顶点是“字符串”。我们用Floyd可以容易求出“original->changed”里出现过任意两个字符串之间的最小代价,将字符串离散化后(用hash表记录每种字符串的序号),记录在数组d[][]里。 + +然后考虑从source到target的转化,很明显这是一个动态规划。对于前i个字符的前缀而言,成功转化的关键在于i所在的字符串转化是什么?我们需要找到一个位置j stringIndices(vector& wordsContainer, vector& wordsQuery) + { + vector>arr; + for (int i=0; i&a, const pair&b) + { + if (a.first.size() != b.first.size()) + return a.first.size() < b.first.size(); + else + return a.second < b.second; + }); + + for (int i=arr.size()-1; i>=0; i--) + { + TrieNode* node = root; + string s = arr[i].first; + for (int j=s.size()-1; j>=0; j--) + { + if (node->next[s[j]-'a']==NULL) + node->next[s[j]-'a'] = new TrieNode(); + node = node->next[s[j]-'a']; + node->idx = arr[i].second; + } + } + + root->idx = arr[0].second; + vectorrets; + for (auto& query: wordsQuery) + { + TrieNode* node = root; + int ans = -1; + for (int i=query.size()-1; i>=0; i--) + { + if (node->next[query[i]-'a']!=NULL) + node = node->next[query[i]-'a']; + else + { + ans = node->idx; + break; + } + } + if (ans==-1) + ans = node->idx; + + rets.push_back(ans); + + } + + return rets; + } +}; diff --git a/Trie/3093.Longest-Common-Suffix-Queries/Readme.md b/Trie/3093.Longest-Common-Suffix-Queries/Readme.md new file mode 100644 index 000000000..1f07cba79 --- /dev/null +++ b/Trie/3093.Longest-Common-Suffix-Queries/Readme.md @@ -0,0 +1,9 @@ +### 3093.Longest-Common-Suffix-Queries + +在一堆字符串里面高效地寻找一个字符串(或它的前缀/后缀),显然我们会使用字典树的数据结构。 + +本题里面,我们在字典树里游走时,对于所处的节点,它可能被多个wordsContainer里面的字符串共享。那么对于每个节点,它究竟属于哪个字符串呢?根据题意,我们的选择依据是:先看总长度更小、再看出现的序号更小。因此,我们需要依据这个规则,给每个节点标记idx这个属性。所以对wordsQuery的某个后缀在字典树里游走完之后,它停留的节点的idx就是答案。 + +如果高效地给字典树的每个节点赋值idx呢?其实很简单,我们先将总长度更大的字符串加入字典树,再将总长度更小的字符串加入字典树。每次加入字符串时,idx的值就按加入字符串的index来。这样我们就发现,字符串长度小的自动会override每个节点的idx属性。同理,我们将wordsContainer序号更大的字符串先加入字典树,再将序号更小的字符串后加入字典树,这样每个节点的idx就会更新为相对更小的wordsContainer index。 + +综上,我们只需要将wordsContainer排序,按照“先看总长度更小、再看出现的序号更小”的原则排序,然后反序,按照后缀加入字典树。然后将wordsQuery的每个字符串按后缀在字典树里游走,最终停留在哪个节点,该节点的idx属性就是答案。 diff --git a/Trie/3632.Subarrays-with-XOR-at-Least-K/3632.Subarrays-with-XOR-at-Least-K.cpp b/Trie/3632.Subarrays-with-XOR-at-Least-K/3632.Subarrays-with-XOR-at-Least-K.cpp new file mode 100644 index 000000000..3ae7f66b6 --- /dev/null +++ b/Trie/3632.Subarrays-with-XOR-at-Least-K/3632.Subarrays-with-XOR-at-Least-K.cpp @@ -0,0 +1,58 @@ +class TrieNode { + public: + TrieNode* next[2]; + int count; + TrieNode() + { + for (int i=0; i<2; i++) + next[i]=NULL; + count = 0; + } +}; +class Solution { + void add(int x) { + TrieNode* node = root; + for (int i=0; i<31; i++) { + int b = ((x>>(30-i))&1); + if (!node->next[b]) + node->next[b] = new TrieNode(); + node = node->next[b]; + node->count += 1; + } + } + TrieNode* root; +public: + long long countXorSubarrays(vector& nums, int k) { + root = new TrieNode(); + int n = nums.size(); + + long long ret = 0; + int sum = 0; + add(sum); + for (int i=0; i>(30-j))&1); + int bitS = ((sum>>(30-j))&1); + int b = bitK^bitS; + if (bitK==0 && node->next[1-b]) { + ret += node->next[1-b]->count; + } + if (node->next[b]) + node = node->next[b]; + else { + flag = 0; + break; + } + } + if (flag) + ret += node->count; + + add(sum); + } + + return ret; + } +}; diff --git a/Trie/3632.Subarrays-with-XOR-at-Least-K/Readme.md b/Trie/3632.Subarrays-with-XOR-at-Least-K/Readme.md new file mode 100644 index 000000000..213c6e79f --- /dev/null +++ b/Trie/3632.Subarrays-with-XOR-at-Least-K/Readme.md @@ -0,0 +1,8 @@ +### 3632.Subarrays-with-XOR-at-Least-K + +我们考虑这样的操作:将所有前缀的XOR_SUM放入一个字典树。这个字典树有31层,对应这整型数字的31个bit。每个节点有一个count属性,记录该节点的子树有个多少叶子节点(即当前有多少个前缀的XOR_SUM经过这个节点)。 + +我们逐个考察每个前缀:假设[0:i]的前缀异或值转化为二进制数组是bitsS,k转化为二进制数组是bitsK。那么我们从高到低考察每个bit位j。易知始终令`b=bitsS[j]^bitsK[j]`,意味着如果某个前缀异或值的第j位是b的话,那么该前缀与[0:i]前缀之间的subarray的异或值到目前为止恰好紧贴着K。依次类推,我们可以在字典树里从高往低遍历直至叶子节点,判断是有多少个前缀恰好与[0:i]分割出的子区间异或值为k。当然也有可能这条路径不存在,意味着没有这样的前缀。 + +除此之外,在上述的字典树的从跟到叶子节点的路径上,如果在某一层`bitsK[j]=0`,那么意味着如果该位置我们不选择上述的`b=bitsS[j]^bitsK[j]`,而是走另一个分支`1-b`,那么会导致由此访问得到的所有前缀、与[0:i]前缀之间的subarray的异或值在第j位上会变成了1,从而大于预期的`bitsK[j]=0`。于是从该分支到其下所有叶子节点的路径也都应该算入符合条件的计数中去。实际中,对于这种路径,我们直接对结果加上`node->next[1-b]->count`即可,而不用真的去递归遍历到底。 + diff --git a/Trie/3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits/3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits.cpp b/Trie/3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits/3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits.cpp new file mode 100644 index 000000000..793fee837 --- /dev/null +++ b/Trie/3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits/3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits.cpp @@ -0,0 +1,60 @@ +class TrieNode { + public: + TrieNode* next[2]; + TrieNode() + { + for (int i=0; i<2; i++) + next[i]=NULL; + } +}; + +class Solution { + TrieNode* root; +public: + void add(int x) { + TrieNode* node = root; + for (int i=0; i<31; i++) { + int b = ((x>>(30-i))&1); + if (!node->next[b]) + node->next[b] = new TrieNode(); + node = node->next[b]; + } + } + + int dfs(TrieNode* node, int i, int x) { + if (node==NULL) { + return -1; + } + if (i==31) return 0; + + int b = ((x>>(30-i))&1); + if (b==1) { + int ans = dfs(node->next[0], i+1, x); + if (ans!=-1) return ans; + else return -1; + } + else { + int ans1 = dfs(node->next[1], i+1, x); + if (ans1!=-1) + return (1<<(30-i))+ans1; + int ans2 = dfs(node->next[0], i+1, x); + if (ans2!=-1) + return ans2; + return -1; + } + } + + long long maxProduct(vector& nums) { + root = new TrieNode(); + int n = nums.size(); + + long long ret = 0; + for (int x: nums) { + int ans = dfs(root, 0, x); + if (ans!=-1) ret = max(ret, (long long)x*ans); + add(x); + } + + return ret; + } +}; diff --git a/Trie/3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits/3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits_v2.cpp b/Trie/3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits/3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits_v2.cpp new file mode 100644 index 000000000..ec2565411 --- /dev/null +++ b/Trie/3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits/3670.Maximum-Product-of-Two-Integers-With-No-Common-Bits_v2.cpp @@ -0,0 +1,18 @@ +class Solution { +public: + long long maxProduct(vector& nums) { + int max_n = *max_element(begin(nums), end(nums)); + int k = log2(max_n)+1; + vector dp(1<next[ch-'a']==NULL) - node->next[ch-'a']=new TrieNode(); + if (node->next[ch-'a']==NULL) + return 0; node=node->next[ch-'a']; } - int SUM=0; - DFS(node,SUM); - return SUM; + + int Sum=0; + DFS(node,Sum); + return Sum; } - void DFS(TrieNode* node, int & SUM) + void DFS(TrieNode* node, int& sum) { if (node==NULL) return; - SUM+=node->val; + sum+=node->val; for (int i=0; i<26; i++) - DFS(node->next[i],SUM); + DFS(node->next[i],sum); } }; - -/** - * Your MapSum object will be instantiated and called as such: - * MapSum obj = new MapSum(); - * obj.insert(key,val); - * int param_2 = obj.sum(prefix); - */ diff --git a/Trie/677.Map-Sum-Pairs/Readme.md b/Trie/677.Map-Sum-Pairs/Readme.md index 2d3e70511..2e388dfd0 100644 --- a/Trie/677.Map-Sum-Pairs/Readme.md +++ b/Trie/677.Map-Sum-Pairs/Readme.md @@ -2,7 +2,4 @@ 常规的Trie操作。 -注意,在类的公告区域里定义了```TrieNode* root```之后,在构造函数里就只需要直接```root=TrieNode()```即可,千万不能```TrieNode* root=TrieNode()```。否则会有意想不到的错误。 - - -[Leetcode Link](https://leetcode.com/problems/map-sum-pairs) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/map-sum-pairs) diff --git a/Two_Pointers/015.3Sum/Readme.md b/Two_Pointers/015.3Sum/Readme.md index 39aa5b10f..ce9ffe746 100644 --- a/Two_Pointers/015.3Sum/Readme.md +++ b/Two_Pointers/015.3Sum/Readme.md @@ -13,7 +13,7 @@ while (leftMap; int i=0; @@ -22,7 +24,8 @@ class Solution { if (Len>j-i+1) { Len = j-i+1; - result = s.substr(i,Len); + ret_start = i; + ret_len = Len; } Map[s[i]]--; if (Map[s[i]]==Table[s[i]]-1) @@ -30,6 +33,6 @@ class Solution { i++; } } - return result; + return s.substr(ret_start, ret_len); } }; diff --git a/Two_Pointers/076.Minimum-Window-Substring/Readme.md b/Two_Pointers/076.Minimum-Window-Substring/Readme.md index 9b9ae4856..f358ed641 100644 --- a/Two_Pointers/076.Minimum-Window-Substring/Readme.md +++ b/Two_Pointers/076.Minimum-Window-Substring/Readme.md @@ -4,9 +4,9 @@ 对于每个新加入的元素s[j],首先更新该字符出现次数的Map[s[i]]++。如果更新后,Map[s[i]]等于需要出现的次数Table[s[i]],则计数器count++,说明有一个字符满足了出现次数的要求. -当count等于t中的字符类型数COUNT时,说明任务已经实现。此时,让左指针不断右移,相应的Map[s[i]]就要自减,一旦Map[s[i] < Table[s[i]],则count需要自减1从而不再满足COUNT,说明需要继续加入新元素才能满足任务. 从而j才可以右移继续遍历。 +当count等于t中的字符类型数COUNT时,说明任务已经实现。此时,让左指针不断右移,相应的Map[s[i]]就要自减,一旦`Map[s[i]] < Table[s[i]]`,则count需要自减1从而不再满足COUNT,说明需要继续加入新元素才能满足任务. 从而j才可以右移继续遍历。 在这个过程中如果满足条件count==COUNT,都需要不断更新和记录结果。 -[Leetcode Link](https://leetcode.com/problems/minimum-window-substring) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/minimum-window-substring) diff --git a/Two_Pointers/1004.Max-Consecutive-Ones-III/Readme.md b/Two_Pointers/1004.Max-Consecutive-Ones-III/Readme.md index 7b60669cb..bca68a0ce 100644 --- a/Two_Pointers/1004.Max-Consecutive-Ones-III/Readme.md +++ b/Two_Pointers/1004.Max-Consecutive-Ones-III/Readme.md @@ -9,8 +9,8 @@ 以上方法的两层循环的时间复杂度是o(NK),显然会超时。 #### 解法2:双指针 -对于任何求subarray的问题,我们通常的做法就是固定左边界,探索右边界。假设我们固定左边界是i,那么要使右边界j最远,需要满足[i,j]最多有K个0。 +对于任何求subarray的问题,我们通常的做法就是固定左边界,探索右边界。假设我们固定左边界是`i`,那么要使右边界`j`最远,需要满足[i,j]最多有K个0。我们只需要将`j`单调右移,同时记录中间遇到了几个0即可。 -此时我们考虑左边界是i+1的情况。如果A[i+1]==1,那么此时[i+1,j]内的需要翻转元素的个数count依然是K,然而右边界j依然不能往右突破。我们只有不停地移动i,直到A[i]==0的时候,意味着第i个元素的不被允许翻转,所以区间内的翻转次数count-=1,因此右边界就又可以移动,直到找到下一个A[j]==0为止(此时count再次变为K)。 +综上,我们用for循环遍历左边界`i`:对于每个`i`我们记录移动右指针`j`,将`j`停在count为K的最远位置。然后左移一个`i`并更新count(如果A[i]原本是0的话,我们要吐出一个flip的名额),再接着移动`j`的位置。 所以两个指针都只会朝一个方向移动。这是快慢类型的双指针,时间复杂度就是o(N). diff --git a/Two_Pointers/1358.Number-of-Substrings-Containing-All-Three-Characters/1358.Number-of-Substrings-Containing-All-Three-Characters.cpp b/Two_Pointers/1358.Number-of-Substrings-Containing-All-Three-Characters/1358.Number-of-Substrings-Containing-All-Three-Characters.cpp new file mode 100644 index 000000000..8f44c1324 --- /dev/null +++ b/Two_Pointers/1358.Number-of-Substrings-Containing-All-Three-Characters/1358.Number-of-Substrings-Containing-All-Three-Characters.cpp @@ -0,0 +1,27 @@ +class Solution { +public: + int numberOfSubstrings(string s) + { + int j = 0; + int count1 = 0, count2 = 0, count3 = 0; + int ret = 0; + for (int i=0; i 0) + ret += s.size()-j+1; + + if (s[i]=='a') count1--; + else if (s[i]=='b') count2--; + else if (s[i]=='c') count3--; + } + + return ret; + } +}; diff --git a/Two_Pointers/1358.Number-of-Substrings-Containing-All-Three-Characters/Readme.md b/Two_Pointers/1358.Number-of-Substrings-Containing-All-Three-Characters/Readme.md new file mode 100644 index 000000000..927942064 --- /dev/null +++ b/Two_Pointers/1358.Number-of-Substrings-Containing-All-Three-Characters/Readme.md @@ -0,0 +1,7 @@ +### 1358.Number-of-Substrings-Containing-All-Three-Characters + +我们固定滑窗的左端点i,向右探索右端点j。当我们发现移动到某处的j,使得[i:j]恰好至少包含a,b,c各一个的时候,那么意味着右端点其实可以直至在从j到n-1的任何位置,都满足条件。这样的区间有n-j个。 + +此时我们查看下一个i作为左端点,同样为了满足[i:j]恰好至少包含a,b,c各一个,j必然向右移动。同理,可以计算出以i为左端点、符合条件的区间的个数。 + +最终答案就是以每个i作为左端点时,符合条件的右端点的数目的总和。 diff --git a/Two_Pointers/1574.Shortest-Subarray-to-be-Removed-to-Make-Array-Sorted/1574.Shortest-Subarray-to-be-Removed-to-Make-Array-Sorted.cpp b/Two_Pointers/1574.Shortest-Subarray-to-be-Removed-to-Make-Array-Sorted/1574.Shortest-Subarray-to-be-Removed-to-Make-Array-Sorted.cpp index da41641b1..2620b4ab4 100644 --- a/Two_Pointers/1574.Shortest-Subarray-to-be-Removed-to-Make-Array-Sorted/1574.Shortest-Subarray-to-be-Removed-to-Make-Array-Sorted.cpp +++ b/Two_Pointers/1574.Shortest-Subarray-to-be-Removed-to-Make-Array-Sorted/1574.Shortest-Subarray-to-be-Removed-to-Make-Array-Sorted.cpp @@ -8,7 +8,7 @@ class Solution { int j = n-1; while (j-1>=0 && arr[j-1]<=arr[j]) j--; - ret = min(ret, j); + ret = j; if (ret==0) return 0; for (int i=0; i& nums, int k) + { + sort(nums.begin(), nums.end()); + int n = nums.size(); + nums.insert(nums.begin(), 0); + vectorpresum(n+1); + for (int i=1; i<=n; i++) + presum[i] = presum[i-1]+nums[i]; + + int i=1; + int ret = 0; + for (int j=1; j<=n; j++) + { + while (!isOK(nums, presum, i, j, k)) + i++; + ret = max(ret, j-i+1); + } + return ret; + } + + bool isOK(vector&nums, vector&presum, int i, int j, int k) + { + LL detla = (LL)nums[j]*(j-i+1) - (presum[j] - presum[i-1]); + return detla <= k; + } +}; diff --git a/Two_Pointers/1838.Frequency-of-the-Most-Frequent-Element/1838.Frequency-of-the-Most-Frequent-Element.cpp b/Two_Pointers/1838.Frequency-of-the-Most-Frequent-Element/1838.Frequency-of-the-Most-Frequent-Element_v2.cpp similarity index 100% rename from Two_Pointers/1838.Frequency-of-the-Most-Frequent-Element/1838.Frequency-of-the-Most-Frequent-Element.cpp rename to Two_Pointers/1838.Frequency-of-the-Most-Frequent-Element/1838.Frequency-of-the-Most-Frequent-Element_v2.cpp diff --git a/Two_Pointers/1838.Frequency-of-the-Most-Frequent-Element/Readme.md b/Two_Pointers/1838.Frequency-of-the-Most-Frequent-Element/Readme.md index 2e582f017..ccc19819d 100644 --- a/Two_Pointers/1838.Frequency-of-the-Most-Frequent-Element/Readme.md +++ b/Two_Pointers/1838.Frequency-of-the-Most-Frequent-Element/Readme.md @@ -1,8 +1,14 @@ ### 1838.Frequency-of-the-Most-Frequent-Element +#### 解法1: 首先需要明确,我们操作后最终得到的最高频率元素一定是数组中既有的元素。为什么?假设你可以通过操作,得到一个最高频率的元素是x,且x在原数组中没有出现过;那么你必然可以通过更少的一些操作,使得原数组里恰好比x小的元素y,也构造出相同的频次。因此我们不妨将nums按从小到大排序。 -那么这个最高频次的元素是什么呢?显然不一定是数组里既有的最高频次元素。我们必须逐个尝试一遍。假设我们通过不超过k次的操作,使nums[i]的频次最高,那么这些操作必然是作用在紧邻i前面的若干元素,使它们变成nums[i]。我们假设操作的范围是[j:i-1],需要的实际操作数就是```count = sum{nums[i]-nums[k]}, k=j,j+1, ... i-1``` +那么这个最高频次的元素是什么呢?显然不一定是数组里既有的最高频次元素。我们必须逐个尝试一遍。假设我们通过不超过k次的操作,使nums[i]的频次最高,那么这些操作必然是作用在紧邻i前面的若干元素,使它们都变成nums[i]。我们假设操作的范围是[j:i]。那么我们将这段区间内的数字都变成nums[i],所需要的操作就是`nums[i]*(i-j+1) - sum[j:i]`. 显然,如果我们实现准备好前缀和数组的话,那么这个操作数就可以o(1)求出。如果操作数大于k,那么我们必须将j右移减小区间,才有可能符合条件。 + +由此可以见,我们只要唯一个滑窗。对于每个i作为区间的右端点,我们不断移动左指针j使得区间[j:i]恰好符合要求,于是j-i+1就是将nums[i]为最高频次元素的次数。 + +#### 解法2: +接下来解释一种不需要presum的滑窗解法。同上,对于区间[j:i]需要的实际操作数,我们也可以写成```count = sum{nums[i]-nums[k]}, k=j,j+1, ... i-1```。 接下来我们考虑如果最终最高频次的元素是nums[i+1],那么我们如何高效地转移?假设需要操作的数的范围起点不变,即[j:i],那么总操作数的增量就是```count += (nums[i+1]-nums[i])*(i+1-j)```,也就是我们将nums[j:i-1]都变成nums[i]的基础上,再将这(i+1-j)个数都提升一个gap,变成nums[i+1]。此时如果count>k了,那么我们就要优先舍弃最前面的元素j,那么节省的操作数就是nums[i+1]-nums[j]。我们可能会舍弃若干个老元素并右移j,直至是的count<=k,那么此时我们就在题目的限制条件下,可以将nums[j:i]都变成了nums[i+1],即频次就是```i+1-j+1```. diff --git a/Two_Pointers/2354.Number-of-Excellent-Pairs/2354.Number-of-Excellent-Pairs.cpp b/Two_Pointers/2354.Number-of-Excellent-Pairs/2354.Number-of-Excellent-Pairs.cpp new file mode 100644 index 000000000..d08d9c3dc --- /dev/null +++ b/Two_Pointers/2354.Number-of-Excellent-Pairs/2354.Number-of-Excellent-Pairs.cpp @@ -0,0 +1,38 @@ +using LL = long long; +class Solution { +public: + long long countExcellentPairs(vector& nums, int k) + { + vectorarr; + unordered_setSet(nums.begin(), nums.end()); + + LL ret = 0; + for (auto x: Set) + { + arr.push_back(__builtin_popcount(x)); + } + + sort(arr.begin(), arr.end()); + + LL n = arr.size(); + LL j = n-1; + for (int i=0; i=0 && arr[i]+arr[j]>=k) + j--; + if (j>=i) + ret += n-(j+1); + else + ret += n-(i+1); + } + ret *= 2; + + for (auto x: arr) + { + if (x*2>=k) + ret++; + } + + return ret; + } +}; diff --git a/Two_Pointers/2354.Number-of-Excellent-Pairs/Readme.md b/Two_Pointers/2354.Number-of-Excellent-Pairs/Readme.md new file mode 100644 index 000000000..948869f51 --- /dev/null +++ b/Two_Pointers/2354.Number-of-Excellent-Pairs/Readme.md @@ -0,0 +1,15 @@ +### 2354.Number-of-Excellent-Pairs + +因为涉及的都是位操作,我们不妨枚举一下对于单个bit位上可能出现的情况: +``` +a b a&b + a|b +1 1 1 + 1 = 2 +1 0 0 + 1 = 1 +0 1 0 + 1 = 1 +0 0 0 + 0 = 0 +``` +我们不难发现,无论a是0还是1,与b配对后,能得到的bit 1的数目的增量,完全取决于b本身是0还是1. 我们将这个结论从单个bit扩展到整数时依然成立,就是无论整数A是如何,与整数B配对后,bit 1的总数目的增量,就是B含有的bit 1的数目。也就是说,任意两个整数A与B配对后,得到的bit 1的总数目,就是他们各自bit 1的数目相加。 + +所以我们只需要将去重后的nums里面的每个元素,转化为其bit 1的数目,得到一个新的数组arr。那么本题的本质就是问在arr里面有多少个pair,使得其元素和大于等于k即可。显然,将arr排序之后,用双指针即可线性地找到所有pairs。注意,因为允许调换顺序,pair的数目要乘以2加入结果. + +此外,本题还允许pair里面的元素相同。所以我们还要单独考察arr里面的每一个元素,查看它自身组成一个pair的话是否符合题意。 diff --git a/Two_Pointers/2401.Longest-Nice-Subarray/2401.Longest-Nice-Subarray.cpp b/Two_Pointers/2401.Longest-Nice-Subarray/2401.Longest-Nice-Subarray.cpp new file mode 100644 index 000000000..3e5e26a51 --- /dev/null +++ b/Two_Pointers/2401.Longest-Nice-Subarray/2401.Longest-Nice-Subarray.cpp @@ -0,0 +1,20 @@ +class Solution { +public: + int longestNiceSubarray(vector& nums) + { + int count = 0; + int j = 0; + int ret = 0; + for (int i=0; i smallestSubarrays(vector& nums) + { + int n = nums.size(); + int j = n-1; + vectorrets(n); + vectorcount(32); + for (int i=n-1; i>=0; i--) + { + for (int k=0; k<32; k++) + count[k] += ((nums[i]>>k)&1); + + + while (j>i && isOK(nums[j], count)) + { + for (int k=0; k<32; k++) + count[k] -= ((nums[j]>>k)&1); + j--; + } + + rets[i] = j-i+1; + } + return rets; + + } + + bool isOK(int num, vector&count) + { + for (int k=0; k<32; k++) + { + if (count[k] > 0 && (count[k] - ((num>>k)&1) <= 0)) + return false; + } + return true; + } +}; diff --git a/Two_Pointers/2411.Smallest-Subarrays-With-Maximum-Bitwise-OR/Readme.md b/Two_Pointers/2411.Smallest-Subarrays-With-Maximum-Bitwise-OR/Readme.md new file mode 100644 index 000000000..ebac0080d --- /dev/null +++ b/Two_Pointers/2411.Smallest-Subarrays-With-Maximum-Bitwise-OR/Readme.md @@ -0,0 +1,19 @@ +### 2411.Smallest-Subarrays-With-Maximum-Bitwise-OR + +我们设想,对于i而言,如果将这个区间的右边界j设置为n-1,那么必然能够得到Maximum Bitwise OR,记做OrSum. 我们该如何减小这个区间但是又不影响OrSum的值呢?最基本的想法就是,先尝试看看去掉nums[j],答案还是OrSum吗?我们如何验证呢,是将[i,j-1]区间内的所有元素再做一遍OR操作吗?显然这个效率太低了。 + +我们这样思考:如果去掉nums[j]对于OrSum没有影响,说明对于nums[j]里某个是1的bit位而言,nums[i:j-1]里必然已经有至少一个元素在该bit位上是1了。 这就提示我们需要统计一下nums[i:j]里面在每个bit位上的1的总个数。举个例子:如果对于某个bit位,区间[i:j]里面有三个1,且nums[j]本身就是1,那么如果把j刨除这个区间后,OR[i:j-1]在这个bit位上不受影响。再比如,如果对于某个bit位,区间[i:j]里面只有一个1,且nums[j]本身就是1,那么如果把j刨除这个区间后,OrSum[i:j-1]在这个bit位上就是0了,显然就不会是maximum Bitwise OR了,所以我们不能将j排除。 + +所以如果我们已知一个区间[i:j]能够得到关于i的maximum bitwise or,并且有如上的count计数器(记录32个bit位里的1的个数),那么能否缩小右区间的范围j,就看 +```cpp +for (int k=0; k<32; k++) +{ + if (count[k]==1 && ((nums[j]>>k)&1) { + return false; + } +} +return true; +``` +根据以上的check函数,我们就可以一路缩小j,直至得到关于i的最小的subarray。 + +那么此时我们想得到关于i-1的答案,是否需要重新将j放回n-1的位置,重复以上的步骤呢?其实不必。因为我们已知[j+1:n-1]的各个位置上的1bit都完全与[i:j]区间重复了。因为[i:j]的存在,[j+1:n-1]这部分区间对于i-1而言也是没有用的。所以当我们考察i-1的答案时,不需要重置j,直接继承i对应的区间右端点即可。 diff --git a/Two_Pointers/2422.Merge-Operations-to-Turn-Array-Into-a-Palindrome/2422.Merge-Operations-to-Turn-Array-Into-a-Palindrome.cpp b/Two_Pointers/2422.Merge-Operations-to-Turn-Array-Into-a-Palindrome/2422.Merge-Operations-to-Turn-Array-Into-a-Palindrome.cpp new file mode 100644 index 000000000..a2b94b71a --- /dev/null +++ b/Two_Pointers/2422.Merge-Operations-to-Turn-Array-Into-a-Palindrome/2422.Merge-Operations-to-Turn-Array-Into-a-Palindrome.cpp @@ -0,0 +1,35 @@ +using LL = long long; +class Solution { +public: + int minimumOperations(vector& nums) + { + int i = 0, j = nums.size()-1; + LL left = nums[i], right = nums[j]; + int count = 0; + + while (i right) + { + j--; + right += nums[j]; + count++; + } + } + return count; + + } +}; diff --git a/Two_Pointers/2422.Merge-Operations-to-Turn-Array-Into-a-Palindrome/Readme.md b/Two_Pointers/2422.Merge-Operations-to-Turn-Array-Into-a-Palindrome/Readme.md new file mode 100644 index 000000000..7ff199e20 --- /dev/null +++ b/Two_Pointers/2422.Merge-Operations-to-Turn-Array-Into-a-Palindrome/Readme.md @@ -0,0 +1,7 @@ +### 2422.Merge-Operations-to-Turn-Array-Into-a-Palindrome + +我们可以设想,最终得到的回文串的最左边和最右边的元素是怎么得到的?一定是来自nums最左侧的若干个元素之和,与nums最右侧的若干个元素之和。考虑到所有的元素都是正数,我们可以用双指针的方法来得到two equal sum。也就是说,初始令`left=nums[0]`和`right=nums[n-1]`,如果发现`leftright`,必然只能将右指针左移才能试图让left与right相等。于是在得到`left==right`之前,两侧指针移动的总次数就是merge的次数。此时,说明我们找到了最终回文串的最外层的一对。剥离掉这最外层后,我们可以重复上述的过程。 + +此外,我们必须考虑到,有可能在左右指针相遇之前,都无法满足`left==right`。这意味着什么呢?说明只有一个方案,即将整个数组都归并在一起,成为回文串的中心。 + +总结:所以本题就是一个双指针,不停地调整左右指针,试图使得前缀和等于后缀和。如此一轮一轮地确定外层的每一对。如果最终指针相遇,意味着该轮的所有元素必须都归并在一起。 diff --git a/Two_Pointers/2461.Maximum-Sum-of-Distinct-Subarrays-With-Length-K/2461.Maximum-Sum-of-Distinct-Subarrays-With-Length-K.cpp b/Two_Pointers/2461.Maximum-Sum-of-Distinct-Subarrays-With-Length-K/2461.Maximum-Sum-of-Distinct-Subarrays-With-Length-K.cpp new file mode 100644 index 000000000..296089a64 --- /dev/null +++ b/Two_Pointers/2461.Maximum-Sum-of-Distinct-Subarrays-With-Length-K/2461.Maximum-Sum-of-Distinct-Subarrays-With-Length-K.cpp @@ -0,0 +1,31 @@ +using LL = long long; +class Solution { +public: + long long maximumSubarraySum(vector& nums, int k) + { + LL ret = 0; + unordered_mapMap; + int count = 0; + LL sum = 0; + for (int i=0; i=k-1) + { + if (count == k) + ret = max(ret, sum); + + Map[nums[i-k+1]]--; + sum -= nums[i-k+1]; + if (Map[nums[i-k+1]]==0) + count--; + } + } + + return ret; + } +}; diff --git a/Two_Pointers/2461.Maximum-Sum-of-Distinct-Subarrays-With-Length-K/Readme.md b/Two_Pointers/2461.Maximum-Sum-of-Distinct-Subarrays-With-Length-K/Readme.md new file mode 100644 index 000000000..01afc5d50 --- /dev/null +++ b/Two_Pointers/2461.Maximum-Sum-of-Distinct-Subarrays-With-Length-K/Readme.md @@ -0,0 +1,17 @@ +### 2461.Maximum-Sum-of-Distinct-Subarrays-With-Length-K + +非常普通的固定长度的滑窗。 + +用一个HashMap来记录每个number出现的次数。用count来表示滑窗内的distinct number的数量。count的变动依据如下: +1. 当新加入一个数字时 +```cpp +Map[num]++; +if (Map[num]==1) + count++; +``` +2. 当移出一个数字时 +```cpp +Map[num]--; +if (Map[num]==0) + count--; +``` diff --git a/Two_Pointers/2516.Take-K-of-Each-Character-From-Left-and-Right/2516.Take-K-of-Each-Character-From-Left-and-Right_v1.cpp b/Two_Pointers/2516.Take-K-of-Each-Character-From-Left-and-Right/2516.Take-K-of-Each-Character-From-Left-and-Right_v1.cpp new file mode 100644 index 000000000..2c90505a2 --- /dev/null +++ b/Two_Pointers/2516.Take-K-of-Each-Character-From-Left-and-Right/2516.Take-K-of-Each-Character-From-Left-and-Right_v1.cpp @@ -0,0 +1,56 @@ +class Solution { +public: + int takeCharacters(string s, int k) + { + int n = s.size(); + int a = 0, b = 0, c = 0; + for (int i=0; i=len) + { + if (s[i-len]=='a') a--; + else if (s[i-len]=='b') b--; + else if (s[i-len]=='c') c--; + } + + if (i>=len-1) + { + if (a<=A && b<=B && c<=C) + return true; + } + } + return false; + } +}; diff --git a/Two_Pointers/2516.Take-K-of-Each-Character-From-Left-and-Right/2516.Take-K-of-Each-Character-From-Left-and-Right_v2.cpp b/Two_Pointers/2516.Take-K-of-Each-Character-From-Left-and-Right/2516.Take-K-of-Each-Character-From-Left-and-Right_v2.cpp new file mode 100644 index 000000000..b5ce392ec --- /dev/null +++ b/Two_Pointers/2516.Take-K-of-Each-Character-From-Left-and-Right/2516.Take-K-of-Each-Character-From-Left-and-Right_v2.cpp @@ -0,0 +1,42 @@ +class Solution { +public: + int takeCharacters(string s, int k) + { + + int A = 0, B = 0, C = 0; + for (auto ch: s) + { + if (ch=='a') A++; + else if (ch=='b') B++; + else if (ch=='c') C++; + } + if (A& nums, int k) + { + int n = nums.size(); + int j = 0; + unordered_mapcount; + + for (int i=0; i= k) + ret += n-j+1; + + total += diff(count, nums[i], -1); + count[nums[i]]--; + } + + return ret; + } + + LL diff(unordered_map&count, int num, int d) + { + LL m = count[num]; + LL old = m*(m-1)/2; + m += d; + LL now = m*(m-1)/2; + return now - old; + } +}; diff --git a/Two_Pointers/2537.Count-the-Number-of-Good-Subarrays/Readme.md b/Two_Pointers/2537.Count-the-Number-of-Good-Subarrays/Readme.md new file mode 100644 index 000000000..0d018d7e5 --- /dev/null +++ b/Two_Pointers/2537.Count-the-Number-of-Good-Subarrays/Readme.md @@ -0,0 +1,5 @@ +### 2537.Count-the-Number-of-Good-Subarrays + +pairs的计算取决于每种数字出现的频次。如果一个subarray里某个数值出现了m次,那么它就能贡献`m/(m-1)/2`个pairs. 很明显,窗口越大,就能够得到越多的pairs。 + +因此我们遍历每个元素i作为窗口的左端点,然后探索右端点的位置j使得窗口内恰好能有k对pairs,那么意味着右端点从j到n-1都是可行的,故有n-j个以i为左端点的合法滑窗。每一个回合,向右移动i一位,j必然是单调右移的。故双指针o(n)时间可解。 diff --git a/Two_Pointers/2564.Substring-XOR-Queries/2564.Substring-XOR-Queries.cpp b/Two_Pointers/2564.Substring-XOR-Queries/2564.Substring-XOR-Queries.cpp new file mode 100644 index 000000000..366d3d737 --- /dev/null +++ b/Two_Pointers/2564.Substring-XOR-Queries/2564.Substring-XOR-Queries.cpp @@ -0,0 +1,41 @@ +using LL = long long; +class Solution { +public: + vector> substringXorQueries(string s, vector>& queries) + { + unordered_map>Map; + for (int i=0; i>rets(m); + for (int i=0; i=len) + sum -= (1LL<i && s[j]==s[j-1]) < 2)) + { + count += (j>i && s[j]==s[j-1]); + j++; + } + ret = max(ret, j-i); + + if (i+1 countServers(int n, vector>& logs, int x, vector& queries) + { + for (int i=0; i>q; + for (int i=0; irets(q.size()); + unordered_mapMap; + int i = 0; + int j = 0; + for (auto qq: q) + { + int t = qq[0], idx = qq[1]; + while (j& nums, int k) + { + unordered_map>Map; + for (int i=0; iSet(s.begin(), s.end()); + for (int T = 1; T <= Set.size(); T++) + { + int len = T * k; + vectorfreq(26,0); + int j = 0; + for (int i=0; i+len-1& freq, int k) + { + for (int x: freq) + { + if (x != k && x != 0) + return false; + } + return true; + } +}; diff --git a/Two_Pointers/2953.Count-Complete-Substrings/Readme.md b/Two_Pointers/2953.Count-Complete-Substrings/Readme.md new file mode 100644 index 000000000..19d1b846e --- /dev/null +++ b/Two_Pointers/2953.Count-Complete-Substrings/Readme.md @@ -0,0 +1,5 @@ +### 2953.Count-Complete-Substrings + +很显然,第一步是将原字符串切割成若干个区间,我们只考虑那些“相邻字符大小不超过2”的那些区间. + +接下来,我们需要计算每个通过初筛区间里,再有多少个符合条件的substring,即要求substring里出现的字符的频次都是k。我们注意到字符的种类只有26种,如果只出现一种字符,那长度就是k;如果出现两种字符,那长度就是2k,以此类推,我们发现可以遍历出现字符的种类数目,然后用一个固定长度的滑窗来判定是否存在substring符合条件。滑窗的运动过程中,只要维护一个hash表即可。 diff --git a/Two_Pointers/2958.Length-of-Longest-Subarray-With-at-Most-K-Frequency/2958.Length-of-Longest-Subarray-With-at-Most-K-Frequency.cpp b/Two_Pointers/2958.Length-of-Longest-Subarray-With-at-Most-K-Frequency/2958.Length-of-Longest-Subarray-With-at-Most-K-Frequency.cpp new file mode 100644 index 000000000..04a8e4f61 --- /dev/null +++ b/Two_Pointers/2958.Length-of-Longest-Subarray-With-at-Most-K-Frequency/2958.Length-of-Longest-Subarray-With-at-Most-K-Frequency.cpp @@ -0,0 +1,26 @@ +class Solution { +public: + int maxSubarrayLength(vector& nums, int k) + { + int n = nums.size(); + unordered_mapcount; + int j = 0; + int ret = 0; + for (int i=0; i& nums) + { + LL n = nums.size(); + LL total = n*(n-1)/2+n; + LL half = (total+1)/2; + int left = 1, right = n; + while (left < right) + { + int mid = left + (right-left)/2; + if (atMostK(nums, mid)>=half) + right = mid; + else + left = mid+1; + } + return left; + } + + LL atMostK(vector& A, int K) + { + unordered_mapMap; + LL count=0; + LL i = 0; + + for (LL j=0; jK) + { + Map[A[i]]--; + if (Map[A[i]]==0) + Map.erase(A[i]); + i++; + } + count+= j-i+1; + } + return count; + } +}; diff --git a/Two_Pointers/3134.Find-the-Median-of-the-Uniqueness-Array/Readme.md b/Two_Pointers/3134.Find-the-Median-of-the-Uniqueness-Array/Readme.md new file mode 100644 index 000000000..205179dc2 --- /dev/null +++ b/Two_Pointers/3134.Find-the-Median-of-the-Uniqueness-Array/Readme.md @@ -0,0 +1,7 @@ +### 3134.Find-the-Median-of-the-Uniqueness-Array + +数组的subarray总数有`N = n(n-1)/2+n`个,直接求所有subarray的中位数肯定不现实。此题几乎肯定需要用二分搜值来解。 + +假设一个数字K,怎么判定它是否是所有subarray的distinct number的中位数,也就是第N/2个呢?显然,我们只需要判定`subarray with at most K distinct number`的个数是否有N/2个即可。有的话,我们就往小调整,否则就往大调整。 + +求“subarray with at most K distinct number”个数,可以用滑动窗口来解决。类似于340,992. diff --git a/Two_Pointers/3234.Count-the-Number-of-Substrings-With-Dominant-Ones/3234.Count-the-Number-of-Substrings-With-Dominant-Ones.cpp b/Two_Pointers/3234.Count-the-Number-of-Substrings-With-Dominant-Ones/3234.Count-the-Number-of-Substrings-With-Dominant-Ones.cpp new file mode 100644 index 000000000..1366613b4 --- /dev/null +++ b/Two_Pointers/3234.Count-the-Number-of-Substrings-With-Dominant-Ones/3234.Count-the-Number-of-Substrings-With-Dominant-Ones.cpp @@ -0,0 +1,53 @@ +class Solution { +public: + int numberOfSubstrings(string s) + { + int n = s.size(); + + vectorright = computeRightArray(s); + int ret = 0; + for (int m = 1; m <= 200; m++) + { + int j = 0, count = 0; + for (int i=0; i= count*count) + { + int extra = right[j-1] - max(0, count*count-have); + ret += max(extra+1, 0); + } + + count -= (s[i]=='0'); + } + } + + for (int i=0; i computeRightArray(const std::string& s) + { + int n = s.mgth(); + std::vector right(n, 0); + + for (int i = n-2; i >=0; i--) { + if (s[i + 1] == '1') { + right[i] = right[i + 1] + 1; + } + } + + return right; + } +}; diff --git a/Two_Pointers/3234.Count-the-Number-of-Substrings-With-Dominant-Ones/Readme.md b/Two_Pointers/3234.Count-the-Number-of-Substrings-With-Dominant-Ones/Readme.md new file mode 100644 index 000000000..d8f3da598 --- /dev/null +++ b/Two_Pointers/3234.Count-the-Number-of-Substrings-With-Dominant-Ones/Readme.md @@ -0,0 +1,13 @@ +### 3234.Count-the-Number-of-Substrings-With-Dominant-Ones + +对于substring,我们第一个想法是考虑前缀之差。假设以i为结尾的前缀里有x个1和y个0,那么我们希望找一个已有的前缀位置j(其包含p个1和q个0),需要满足`(x-p)>=(y-q)^2`. 由于这其中包含了平方关系,很难构造hash找出符合条件的j。 + +这是我们再审查这个平方关系。一般情况下,这意味着substring里的1要比0多很多。比如说有10个一,那么就需要有100个零。考虑到s的总长度也不过是4e4,这就意味着其实我们寻找的字符串里最多也就200个零,再配上40000个一就到达极限了。 + +所以此时我们的方案几乎就呼之欲出了,那就是穷举包含零的个数为1到200的substring。我们遍历长度m=1,...,200,对于每个固定的m,通过一遍单调的双指针移动就可以把所有包含零的个数是m的滑窗都找出来。时间复杂度是o(200n),符合题意。 + +假设找到一段滑窗,里面包含了m个零,那么该如何计数以它为基础的符合条件的substring呢?一种想法是,假设确定了最外边的两个0的位置i和j,那么我们可以自由调配i左边的1的个数、以及j右边的1的个数,但要使得区间内的0(个数已经固定)与1的个数符合条件。这样的方法比较复杂。 + +其实有一种更简单的做法,那么穷举substring的左边界i(不管是0还是1),通过上述的滑窗准则,找到对应右边界的j使得[i:j]恰好有m个0,另外有t个1. 那么我们可以知道,至少还需要`m*m-t`个1,这就需要从s[j]右边的若干个连续的1里面取。假设s[j]右边有连续k个1,那么超过`m*m-t`的部分我们可以自由选择,故有`k-(m*m-t)+1`种合法的substring右边界。 + +另外,对于m=0的特殊情况我们要单独处理。即substring里只含有1不含有0. 那么任意以1为左边界的substring,它的右边界可以包含任意数目的连续的1. diff --git a/Two_Pointers/3298.Count-Substrings-That-Can-Be-Rearranged-to-Contain-a-String-II/3298.Count-Substrings-That-Can-Be-Rearranged-to-Contain-a-String-II_v1.cpp b/Two_Pointers/3298.Count-Substrings-That-Can-Be-Rearranged-to-Contain-a-String-II/3298.Count-Substrings-That-Can-Be-Rearranged-to-Contain-a-String-II_v1.cpp new file mode 100644 index 000000000..7113126aa --- /dev/null +++ b/Two_Pointers/3298.Count-Substrings-That-Can-Be-Rearranged-to-Contain-a-String-II/3298.Count-Substrings-That-Can-Be-Rearranged-to-Contain-a-String-II_v1.cpp @@ -0,0 +1,36 @@ +using LL = long long; +class Solution { +public: + long long validSubstringCount(string word1, string word2) + { + vectortarget(26); + for (auto ch: word2) + target[ch-'a']++; + + vectorcount(26); + int j = 0; + LL ret = 0; + + int n = word1.size(); + for (int i=0; i&count, vector&target) + { + for (int i=0; i<26; i++) + if (count[i]target(26); + for (auto ch: word2) + target[ch-'a']++; + int T = 0; + for (int i=0; i<26; i++) + if (target[i]!=0) T++; + + vectorcount(26); + int j = 0; + LL ret = 0; + + int t = 0; + int n = word1.size(); + for (int i=0; iSet({'a','e','i','o','u'}); + unordered_mapMap; + + vectorconsecutive(n); + int c = 0; + for (int i=n-1; i>=0; i--) + { + if (Set.find(word[i])==Set.end()) + c = 0; + else + c++; + consecutive[i] = c; + } + + long long ret = 0; + int j = 0; + for (int i=0; i=k`. 此时两种情况: +1. 如果count1>k,那么我们必然会尝试右移左端点i,因为再右移右端点j的话必然不会满足count1. +2. 如果count1==k,那么我们就找到了一组合法区间[i:j]。那么我们是否还有其他以i为左端点的合法区间呢?事实上,如果j右边有连续的元音出现的话,这些都是可以纳入合法区间的。所以我们可以提前计算一个数组A[k],记录k及k右边连续有多少个元音。这样答案就增加了`1+A[j+1]`个。 + +以上就考虑完了所有以i为左端点的情况。下一步就是将i右移一步,更新count0和count1,然后继续探索右端点即可。 diff --git a/Two_Pointers/3634.Minimum-Removals-to-Balance-Array/3634.Minimum-Removals-to-Balance-Array.cpp b/Two_Pointers/3634.Minimum-Removals-to-Balance-Array/3634.Minimum-Removals-to-Balance-Array.cpp new file mode 100644 index 000000000..bcb70c85f --- /dev/null +++ b/Two_Pointers/3634.Minimum-Removals-to-Balance-Array/3634.Minimum-Removals-to-Balance-Array.cpp @@ -0,0 +1,19 @@ +using ll = long long; +class Solution { +public: + int minRemoval(vector& nums, int k) { + sort(nums.begin(), nums.end()); + int n = nums.size(); + int j = 0; + int ret = n; + for (int i=0; inums[i]*k`,这就意味着必须将从j到n-1的元素都删除才能符合要求。 + +假设我们删除最小值nums[0]但保留次小值nums[1],那么我们发现为了保证最大元素与最小元素的ratio不超过k,那么j的极限位置可以单调地右移。确定了新的j之后,我们发现此时需要删除1+n-j个元素,其中1个是左边删除的小数,n-j个是右边删除的大数。 + +所以我们可以顺次移动左指针i,相应地单调移动右指针j,直至恰好`j==n || nums[j]>nums[i]*k`。此时两种情况: +1. 如果`j==n`,说明ratio还没超过k,右端不需要删除任何数字。只需要删除左边的i个数字即可。 +2. 如果`nums[j]>nums[i]*k`,说明右端需要删除n-j个数字。左边需要删除i个数字。 + +遍历所有的i之后,取全局最小值。 + diff --git a/Two_Pointers/3641.Longest-Semi-Repeating-Subarray/3641.Longest-Semi-Repeating-Subarray.cpp b/Two_Pointers/3641.Longest-Semi-Repeating-Subarray/3641.Longest-Semi-Repeating-Subarray.cpp new file mode 100644 index 000000000..d35ea3ef1 --- /dev/null +++ b/Two_Pointers/3641.Longest-Semi-Repeating-Subarray/3641.Longest-Semi-Repeating-Subarray.cpp @@ -0,0 +1,25 @@ +class Solution { +public: + int longestSubarray(vector& nums, int k) { + unordered_mapMap; + int count = 0; + int n = nums.size(); + int j = -1; + int ret = 0; + for (int i=0; ik) + ret = max(ret, j - i); + else + ret = max(ret, j - i + 1); + Map[nums[i]]--; + if (Map[nums[i]]==1) count--; + } + return ret; + + } +}; diff --git a/Two_Pointers/3641.Longest-Semi-Repeating-Subarray/Readme.md b/Two_Pointers/3641.Longest-Semi-Repeating-Subarray/Readme.md new file mode 100644 index 000000000..af285715f --- /dev/null +++ b/Two_Pointers/3641.Longest-Semi-Repeating-Subarray/Readme.md @@ -0,0 +1,7 @@ +### 3641.Longest-Semi-Repeating-Subarray + +非常明显的双指针,但是滑窗右边界j的控制需要格外小心。 + +对于任意的左边界i,当满足`while (j+1k,那么意味着j超出了合法的范围,合法subarray的长度最多是`j-i`. +2. 如果count<=k,那么意味着j已经到了最后一个元素(即n-1)但是仍未超出合法范围,故此时合法subarray的长度是`j-i+1`. diff --git a/Two_Pointers/992.Subarrays-with-K-Different-Integers/992.Subarrays-with-K-Different-Integers_v2.cpp b/Two_Pointers/992.Subarrays-with-K-Different-Integers/992.Subarrays-with-K-Different-Integers_v2.cpp deleted file mode 100644 index 66a2d8827..000000000 --- a/Two_Pointers/992.Subarrays-with-K-Different-Integers/992.Subarrays-with-K-Different-Integers_v2.cpp +++ /dev/null @@ -1,25 +0,0 @@ -class Solution { -public: - int lengthOfLongestSubstringKDistinct(string s, int k) - { - vectorfreq(256,0); - int count = 0; - int i = 0; - int ret = 0; - for (int j=0; jk) - { - freq[s[i]]--; - if (freq[s[i]]==0) - count--; - i++; - } - ret = max(ret, j-i+1); - } - return ret; - } -}; diff --git a/Union_Find/128.Longest-Consecutive-Sequence/128.Longest-Consecutive-Sequence_v2.cpp b/Union_Find/128.Longest-Consecutive-Sequence/128.Longest-Consecutive-Sequence_v2.cpp index 2c9414131..599b8c589 100644 --- a/Union_Find/128.Longest-Consecutive-Sequence/128.Longest-Consecutive-Sequence_v2.cpp +++ b/Union_Find/128.Longest-Consecutive-Sequence/128.Longest-Consecutive-Sequence_v2.cpp @@ -1,22 +1,25 @@ class Solution { -public: - int longestConsecutive(vector& nums) - { - unordered_setSet; - for (auto a:nums) - Set.insert(a); - - int result = 0; - for (int i=0; i& nums) { + unordered_set Set; + for (auto a : nums) + Set.insert(a); + + int result = 0; + unordered_set Visited; + for (int i = 0; i < nums.size(); i++) { + if (Set.find(nums[i] - 1) != Set.end() || + Visited.find(nums[i]) != Visited.end()) + continue; + Visited.insert(nums[i]); + int j = nums[i] + 1; + while (Set.find(j) != Set.end()) { + Visited.insert(j); + j++; + } + + result = max(result, j - nums[i]); + } + return result; } - return result; - } -}; + }; \ No newline at end of file diff --git a/Union_Find/1631.Path-With-Minimum-Effort/Readme.md b/Union_Find/1631.Path-With-Minimum-Effort/Readme.md index c9565cf93..f71a1fdd4 100644 --- a/Union_Find/1631.Path-With-Minimum-Effort/Readme.md +++ b/Union_Find/1631.Path-With-Minimum-Effort/Readme.md @@ -7,3 +7,5 @@ #### 解法2:Union Find 我们将所有的边按照diff从小到大排个序。优先选最小的边,看能联通哪些点;再选次小的边,看能联通哪些点。直至选中某条边之后,发现起点和终点恰好联通,那么这条边的diff就是答案。 + +注:此题和[1102](https://github.com/wisdompeak/LeetCode/tree/master/Binary_Search/1102.Path-With-Maximum-Minimum-Value)非常相似。 diff --git a/Union_Find/2421.Number-of-Good-Paths/2421.Number-of-Good-Paths_v1.cpp b/Union_Find/2421.Number-of-Good-Paths/2421.Number-of-Good-Paths_v1.cpp new file mode 100644 index 000000000..d0b268d3f --- /dev/null +++ b/Union_Find/2421.Number-of-Good-Paths/2421.Number-of-Good-Paths_v1.cpp @@ -0,0 +1,65 @@ +class Solution { + vectorFather; + int FindFather(int x) + { + if (Father[x]!=x) + Father[x] = FindFather(Father[x]); + return Father[x]; + } + + void Union(int x, int y) + { + x = Father[x]; + y = Father[y]; + if (x>e [100005]; + + int numberOfGoodPaths(vector& vals, vector>& edges) + { + int n = vals.size(); + Father.resize(n); + for (int i=0; i>val2idx; + for (int i=0; iValSet(vals.begin(), vals.end()); + for (int v: ValSet) + { + for (auto& [v,x]: e[v]) + { + if (FindFather(v)!=FindFather(x)) + Union(v,x); + } + + unordered_mapcount; + for (auto idx: val2idx[v]) + { + int root = FindFather(idx); + count[root]++; + } + + for (auto& [v, num]: count) + ret += num*(num-1)/2; + } + + return ret + n; + + } +}; diff --git a/Union_Find/2421.Number-of-Good-Paths/2421.Number-of-Good-Paths_v2.cpp b/Union_Find/2421.Number-of-Good-Paths/2421.Number-of-Good-Paths_v2.cpp new file mode 100644 index 000000000..041500aaa --- /dev/null +++ b/Union_Find/2421.Number-of-Good-Paths/2421.Number-of-Good-Paths_v2.cpp @@ -0,0 +1,48 @@ +class Solution { + vectornext [100005]; + int ans = 0; + int n; + vector vals; +public: + int numberOfGoodPaths(vector& vals, vector>& edges) + { + this->vals = vals; + n = vals.size(); + for (auto& edge: edges) + { + next[edge[0]].push_back(edge[1]); + next[edge[1]].push_back(edge[0]); + } + dfs(0,-1); + return ans + n; + } + + mapdfs(int cur, int parent) + { + mapcount; + count[vals[cur]] += 1; + + for (int child: next[cur]) + { + if (child == parent) continue; + map tmp = dfs(child, cur); + + auto iter = tmp.lower_bound(vals[cur]); + tmp.erase(tmp.begin(), iter); + + if (tmp.size() > count.size()) + swap(tmp, count); + + for (auto& [val, frq]: tmp) + { + if (count.find(val)!=count.end()) + ans += frq * count[val]; + } + + for (auto& [val, frq]: tmp) + count[val] += frq; + } + + return count; + } +}; diff --git a/Union_Find/2421.Number-of-Good-Paths/Readme.md b/Union_Find/2421.Number-of-Good-Paths/Readme.md new file mode 100644 index 000000000..acd7f9803 --- /dev/null +++ b/Union_Find/2421.Number-of-Good-Paths/Readme.md @@ -0,0 +1,23 @@ +### 2421.Number-of-Good-Paths + +#### 方法1:并查集 +我们看到元素个数是`3e4`,并不是特别好判断对于时间复杂度的要求。但是注意到数值大小的范围是1e5,这就提醒我们可以从数值大小出发。我们不妨尝试从小到大分析这些节点。 + +首先考虑数值为0的节点。多数情况下,它们应该都是离散的点。但是有些时候一些0节点是彼此联通的,那么在这个联通区间内,任意两个节点都是一条合法的path(因为经过的任意节点都是0)。 + +此时我们再考虑数值为1的节点。因为我们此时仅考虑数值是0或1的节点,我们仅限引入那些以1为一个端点、以0或1为另一个端点的边。加入这些边后,那么就可以拓展上面的联通区域。并且我们可以发现,在每个联通区域内,任意两个以1节点为端点的path都是合法的。我们只需要知道此时这些1节点在各自联通区域内的分布数量,就可以算出有多少个这样的path。 + +接下来我们在考虑数值为2的节点。类似地,我们引入所有端点数值不超过2的边,进一步扩展上述的图,有些区域会被进一步联通。在每个联通区域内,任意两个以2节点为端点的path都是合法的。 + +可见,这道题的算法就是并查集,从小到大地引入节点(和相应的边),构建联通关系。 + +为了操作上的方便,我们将所有的edge按照较大的端点分类。比如说e[2],表示所有以(数值)2节点为一端,以0/1/2节点为另一端的边的集合。这样方便我们按顺序往图里加入边。 + +#### 方法2:DFS +本题其实也可以按照普通的DFS来做。我们任意以某个节点(比如说0号节点)为根,看做一棵rooted tree. 类似于“path in a tree”的思想,对于每个节点node而言,我们想知道以它为turning point的合法path有多少(每个path必然有一个turning point和两条单链)。以此做不重不漏的统计。 + +考虑对于node作为turning point的合法路径。假设其端点数值是x(显然x必须大于等于vals[node]),我们只需要知道每个子树里有多少条以x为端点、node为终点的合法单链(该单链里任何元素都不大于x)。将这些不同子树的这些单链之间彼此组合即可。 + +更具体地,对于每个node,我们设计一个数据结构`map count`,其中key表示子树里节点的数值,val表示该数值的节点(可能有多个)到node的合法单链的数目(合法的意思是沿途的节点数值都小于key)。显然,为了保证单链的合法性,我们只需要在map里保存其中key值大于等于vals[node]的部分。对于node的每个孩子的递归结果`map tmp`,我们最终都要将其merge到node的count里去。在merge前,对于tmp里的每一个key,都有`ans+=tmp[key]*count[key]`;在merge后,对于tmp里的每一个key,都有`count[key]+=tmp[key]`. + +此处有一个非常重要的优化技巧。我们在合并tmp与count的时候,遍历的是tmp里面的key。但是如果我们发现count.size()比tmp小的时候,可以先swap(tmp,count),以减少需要遍历的key的数目。 diff --git a/Union_Find/2492.Minimum-Score-of-a-Path-Between-Two-Cities/2492.Minimum-Score-of-a-Path-Between-Two-Cities.cpp b/Union_Find/2492.Minimum-Score-of-a-Path-Between-Two-Cities/2492.Minimum-Score-of-a-Path-Between-Two-Cities.cpp new file mode 100644 index 000000000..0b325afa4 --- /dev/null +++ b/Union_Find/2492.Minimum-Score-of-a-Path-Between-Two-Cities/2492.Minimum-Score-of-a-Path-Between-Two-Cities.cpp @@ -0,0 +1,41 @@ +class Solution { + int Father[100005]; + int FindFather(int x) + { + if (Father[x]!=x) + Father[x] = FindFather(Father[x]); + return Father[x]; + } + + void Union(int x, int y) + { + x = Father[x]; + y = Father[y]; + if (x>& roads) + { + for (int i=1; i<=n; i++) + Father[i] = i; + + for (auto road: roads) + { + int a = road[0], b = road[1], d = road[2]; + if (FindFather(a)!=FindFather(b)) + Union(a,b); + } + + int ret = INT_MAX; + for (auto road: roads) + { + int a = road[0], b = road[1], d = road[2]; + if (FindFather(a)==FindFather(1)) + ret = min(ret, d); + } + + return ret; + } +}; diff --git a/Union_Find/2492.Minimum-Score-of-a-Path-Between-Two-Cities/Readme.md b/Union_Find/2492.Minimum-Score-of-a-Path-Between-Two-Cities/Readme.md new file mode 100644 index 000000000..4ba9c11ce --- /dev/null +++ b/Union_Find/2492.Minimum-Score-of-a-Path-Between-Two-Cities/Readme.md @@ -0,0 +1,5 @@ +### 2492.Minimum-Score-of-a-Path-Between-Two-Cities + +注意题意,一条路径允许重复访问边和节点。因此城市1与N之间的score本质,就是这两个节点所在连通图里最短的边。 + +所以我们用Union Find将所有的节点标记联通之后,只要再遍历一遍所有的边,找到最短的边、同时两个端点都是与1(或者N)联通的。 diff --git a/Union_Find/2709.Greatest-Common-Divisor-Traversal/2709.Greatest-Common-Divisor-Traversal.cpp b/Union_Find/2709.Greatest-Common-Divisor-Traversal/2709.Greatest-Common-Divisor-Traversal.cpp new file mode 100644 index 000000000..496887207 --- /dev/null +++ b/Union_Find/2709.Greatest-Common-Divisor-Traversal/2709.Greatest-Common-Divisor-Traversal.cpp @@ -0,0 +1,89 @@ +class Solution { +public: + int Father[2*100005]; + + + int FindFather(int x) + { + if (Father[x]!=x) + Father[x] = FindFather(Father[x]); + return Father[x]; + } + + void Union(int x, int y) + { + x = Father[x]; + y = Father[y]; + if (x>y) Father[y] = x; + else Father[x] = y; + } + + vectorEratosthenes(int n) + { + vectorq(n+1,0); + vectorprimes; + for (int i=2; i<=sqrt(n); i++) + { + if (q[i]==1) continue; + int j=i*2; + while (j<=n) + { + q[j]=1; + j+=i; + } + } + for (int i=2; i<=n; i++) + { + if (q[i]==0) + primes.push_back(i); + } + return primes; + } + + bool canTraverseAllPairs(vector& nums) + { + int MX = *max_element(nums.begin(), nums.end()); + vectorprimes = Eratosthenes(MX); + int M = primes.size(); + + int N = nums.size(); + unordered_mapidx; + for (int j=0; j x) break; + if (p*p > x) + { + if (FindFather(i)!=FindFather(N+idx[x])) + Union(i, N+idx[x]); + break; + } + + if (x%p==0) + { + if (FindFather(i)!=FindFather(N+j)) + Union(i, N+j); + while (x%p==0) + x /= p; + } + } + } + + for (int i=0; i next[100005]; + unordered_setprimes; + LL global = 0; +public: + int FindFather(int x) + { + if (Father[x]!=x) + Father[x] = FindFather(Father[x]); + return Father[x]; + } + + void Union(int x, int y) + { + x = Father[x]; + y = Father[y]; + if (xEratosthenes(int n) + { + vectorq(n+1,0); + unordered_setprimes; + for (int i=2; i<=sqrt(n); i++) + { + if (q[i]==1) continue; + int j=i*2; + while (j<=n) + { + q[j]=1; + j+=i; + } + } + for (int i=2; i<=n; i++) + { + if (q[i]==0) + primes.insert(i); + } + return primes; + } + + bool isPrime(int x) + { + return primes.find(x)!=primes.end(); + } + + long long countPaths(int n, vector>& edges) + { + primes = Eratosthenes(n); + + for (int i=1; i<=n; i++) + Father[i] = i; + + for (auto& edge: edges) + { + int a = edge[0], b = edge[1]; + next[a].push_back(b); + next[b].push_back(a); + if (!isPrime(a) && !isPrime(b)) + { + if (FindFather(a)!=FindFather(b)) + Union(a,b); + } + } + + unordered_mapMap; + for (int i=1; i<=n; i++) + Map[FindFather(i)]+=1; + + for (int p: primes) + { + vectorarr; + for (int nxt: next[p]) + { + if (!isPrime(nxt)) + arr.push_back(Map[FindFather(nxt)]); + } + LL total = accumulate(arr.begin(), arr.end(), 0LL); + LL sum = 0; + for (LL x: arr) + sum += x*(total-x); + global += sum/2 + total; + } + + return global; + } + +}; diff --git a/Union_Find/2867.Count-Valid-Paths-in-a-Tree/Readme.md b/Union_Find/2867.Count-Valid-Paths-in-a-Tree/Readme.md new file mode 100644 index 000000000..4ea6c5780 --- /dev/null +++ b/Union_Find/2867.Count-Valid-Paths-in-a-Tree/Readme.md @@ -0,0 +1,16 @@ +### 2867.Count-Valid-Paths-in-a-Tree + +很显然,因为需要计数的path只有一个prime,我们必然count paths by prime. + +我们考虑每个是质数的节点P,考虑经过它的有效路径。显然,一个最显著的pattern就是:从P某个联通的合数节点(不需要紧邻但是不能被其他质数隔开)开始,经过P,再到P的另一个联通的合数。 + +假设A有M个紧邻的合数节点(显然不会关注紧邻的质数节点),这些合数节点又各自分别于若干个合数节点联通,记这些联通区域里分别有m1,m2,m3...个联通的合数节点。显然,从m1里的任何一个节点,到除m1里的任意节点,都是合法路径。令`m1+m2+m3+...=total`,则有 +```cpp +for (int i=1; i<=M; i++) + count += m_i * (total - m_i); +``` +但是注意,以上的count对于起点、终点互换的路径是重复计算了,所以最终有效的是count/2条路径。 + +另外,有效路径的第二个pattern,就是以P为起点,终点是任意与P联通的合数节点,这样的路径恰好就是total条。 + +最终的答案就是对于每个P,累加`count/2+total`. diff --git a/Union_Find/3600.Maximize-Spanning-Tree-Stability-with-Upgrades/3600.Maximize-Spanning-Tree-Stability-with-Upgrades.cpp b/Union_Find/3600.Maximize-Spanning-Tree-Stability-with-Upgrades/3600.Maximize-Spanning-Tree-Stability-with-Upgrades.cpp new file mode 100644 index 000000000..e530244e8 --- /dev/null +++ b/Union_Find/3600.Maximize-Spanning-Tree-Stability-with-Upgrades/3600.Maximize-Spanning-Tree-Stability-with-Upgrades.cpp @@ -0,0 +1,75 @@ +class Solution { +public: + struct DSU { + vector p, r; + DSU(int n): p(n,-1), r(n,0) { + iota(p.begin(), p.end(), 0); + } + int find(int x) { + return p[x]==x ? x : p[x]=find(p[x]); + } + bool unite(int a, int b) { + a = find(a); b = find(b); + if (a == b) return false; + if (r[a] < r[b]) swap(a,b); + p[b] = a; + if (r[a]==r[b]) ++r[a]; + return true; + } + }; + + bool isOK(int T, int n, vector>& edges, int k) { + DSU dsu(n); + int count = 0; + int upgrade = 0; + + vector>candidates; // upgrade, u,v + for (auto& e: edges) { + int u = e[0], v = e[1], s = e[2], must = e[3]; + if (must) { + if (s=T) + candidates.push_back({0, u, v}); + else if (2*s>=T) + candidates.push_back({1, u, v}); + } + } + + sort(candidates.begin(), candidates.end()); + + for (auto& cand: candidates) { + int u = cand[1], v = cand[2]; + if (count==n-1) break; + if (dsu.find(u)!=dsu.find(v)) { + dsu.unite(u,v); + count++; + upgrade += cand[0]; + if (upgrade > k) return false; + } + } + + for (int i=0; i>& edges, int k) { + int lo = 1, hi = 1e5*2; + while (lo < hi) { + int mid = hi-(hi-lo)/2; + if (isOK(mid, n, edges, k)) + lo = mid; + else + hi = mid-1; + } + if (isOK(lo, n, edges, k)) + return lo; + else + return -1; + } +}; diff --git a/Union_Find/3600.Maximize-Spanning-Tree-Stability-with-Upgrades/Readme.md b/Union_Find/3600.Maximize-Spanning-Tree-Stability-with-Upgrades/Readme.md new file mode 100644 index 000000000..2955612c2 --- /dev/null +++ b/Union_Find/3600.Maximize-Spanning-Tree-Stability-with-Upgrades/Readme.md @@ -0,0 +1,7 @@ +### 3600.Maximize-Spanning-Tree-Stability-with-Upgrades + +突破口是二分搜值。如果猜测全局最小的stability是T的话,尝试能否用最小生成树的方法构造出一棵树来。 + +当固定全局最小的stability是T之后,那么哪些edge能用哪些不能用就一目了然了。对于must的那些边,如果存在stability小于T的,那么显然无解。对于非must的边,如果`2*stabilityRoot; +public: + int minMalwareSpread(vector>& graph, vector& initial) + { + int N = graph.size(); + for (int i=0; i>Children; + for (int i=0; iSet(initial.begin(),initial.end()); + + int MaxGroup = 0; + int result; + for (auto a:Children) + { + int count = 0; + int candidate; + for (auto b:a.second) + { + if (Set.find(b)!=Set.end()) + { + count++; + candidate = b; + } + if (count>1) break; + } + if (count==1 && (a.second.size()>MaxGroup || a.second.size()==MaxGroup && candidateprimes = makePrimes(sqrt(100000)); for (auto p:primes) Root[p] = p; @@ -67,4 +67,4 @@ ``` -[Leetcode Link](https://leetcode.com/problems/largest-component-size-by-common-factor) \ No newline at end of file +[Leetcode Link](https://leetcode.com/problems/largest-component-size-by-common-factor)