linuxchain
BAN USERThis is the c++ implementation.
I think we can simply find the longest subsequence
whose sum is no greater than max.
int longestSubseqMaxSum(const vector<int> &arr, int maxSum)
{
int p1 =0, p2 = 0;
int S = arr[0];
int maxLen = 0;
if(S <= maxSum)
maxLen = 1;
while(p2 < arr.size() - 1){
if(S <= maxSum){
S += arr[++p2];
}
else {
S -= arr[p1++];
}
if(S <= maxSum && p2 - p1 + 1 > maxLen)
maxLen = p2 - p1 + 1;
}
return maxLen;
}
This is the test result
arr = {1, 4, 7, }, max = 5
result = 2
arr = {3, 1, 2, 3, }, max = 4
result = 2
arr = {3, 1, 1, 2, 5, 1, 2, 1, 2, }, max = 10
result = 5
Below is the implementation in C++
#include <iostream>
#include <vector>
#include <string>
#include <algorithm>
#include <locale>
using namespace std;
enum StrType { GOOD, BAD, MIXED };
bool isChar(char c)
{
if(c >= 'a' && c <= 'z')
return true;
return false;
}
bool isVowel(char c)
{
if(c == 'a' || c == 'e' || c == 'i'
|| c == 'o' || c == 'u')
return true;
return false;
}
StrType checkStrType(string str)
{
int vcnt = 0, vcnt_mixed = 0;
int ccnt = 0, ccnt_mixed = 0;
bool mixed = false;
for_each(str.begin(), str.end(), [](char &x){x = tolower(x);});
for(int i = 0; i < str.size(); i++){
if(isChar(str[i])){
if(isVowel(str[i])){
vcnt++;
if(vcnt_mixed > 0)
vcnt_mixed++;
ccnt = ccnt_mixed = 0;
}
else {
ccnt++;
if(ccnt_mixed > 0)
ccnt_mixed++;
vcnt = vcnt_mixed = 0;
}
}
else if(str[i] == '?'){
vcnt_mixed += vcnt + 1;
ccnt_mixed += ccnt + 1;
vcnt = ccnt = 0;
}
if(vcnt >= 3 || ccnt >= 5)
return BAD;
if(vcnt_mixed >= 3 || ccnt_mixed >= 5)
mixed = true;
}
return mixed ? MIXED : GOOD;
}
Here are some test results.
abc : GOOD
abcdefghjkl : BAD
abcd?eg : GOOD
abcd?eij : MIXED
abcd?keij : MIXED
abcd??eij : MIXED
kbd??ej : MIXED
kbd?e? : MIXED
?aa : MIXED
: GOOD
jk : GOOD
Here is my code in C++.
Starting from the most significant digit,
count the number of 2s appeared in the digit.
using ulong = unsigned long long;
ulong count2Range(ulong n)
{
string N = to_string(n);
ulong cnt = 0;
for(int pos = 0; pos < N.size(); pos++){
int digit = N.size() - 1 - pos;
// count contribution at the digit
// while counting upto [0...pos] * 10^i
if(pos != 0){
ulong t = atoi(N.substr(0, pos).c_str());
cnt += t * pow(10, digit);
}
// after [0...pos] * 10^i
if(N[pos] - '0' >= 3){
cnt += pow(10, digit);
}
else if(N[pos] - '0' >= 2){
cnt += 1;
if(pos != N.size() - 1)
cnt += atoi(N.substr(pos + 1).c_str());
}
}
return cnt;
}
O(n) solution provided.
Keeping track of an index range [start, end] and counts of excessive integers
within the range in a hash map.
The negative count value means the corresponding integers are
absent (or insufficient) in the current range.
The range head is advanced when there is any integers with negative counter (i.e., insufficient),
while tail is advanced when all of integers in sub-array are contained in the range
(i.e., all of count values are non negative)
The variable numNeed is used to ease the decision which pointer to move.
Assumed any integer (>= 10 or < 0) can be included in the array and
same integer can appear in the sub-array multiple times.
pair<int,int> smallestSubsubay(vector<int> arr, vector<int> sub)
{
unordered_map<int,int> S;
int numNeed = 0;
pair<int,int> minRange = make_pair(-1, arr.size());
for(int i = 0; i < sub.size(); i++){
S[sub[i]]--;
if(S[sub[i]] == -1)
numNeed++;
}
int start = 0;
int end = -1;
while(start < arr.size()) {
if(numNeed > 0){
end++;
if(end >= arr.size())
break;
S[arr[end]]++;
if(S[arr[end]] == 0)
numNeed--;
}
else {
S[arr[start]]--;
if(S[arr[start]] == -1)
numNeed++;
start++;
}
if(numNeed == 0 &&
end - start < minRange.second - minRange.first){
minRange = make_pair(start, end);
}
}
return minRange;
}
O(n^2) algorithm.
Assumed that each word can be used no more than once (by the definition of the permutation).
bool strPermuteFrom(unordered_set<string> &dict, string &str, int pos)
{
if(pos >= str.size())
return true;
if(dict.size() == 0)
return false;
for(int j = pos; j < str.size(); j++){
string w = str.substr(pos, j - pos + 1);
if(dict.find(w) != dict.end()){
dict.erase(w);
int status = strPermuteFrom(dict, str, j+1);
dict.insert(w);
if(status){
if(pos > 0) str.insert(pos, " ");
return status;
}
}
}
return false;
}
bool strPermute(unordered_set<string> &dict, string &str)
{
return strPermuteFrom(dict, str, 0);
}
result:
dictionary = { acting, good, bad, actor, }
badactorgoodact : notconstructible
badactorgoodacting : constructible (bad actor good acting)
badactorbadacting : notconstructible
I think there should be another constraint that
only "sub-sequences" (i.e., consecutive in the list) summed to zero can be removed.
If there is no such limitation, I believe this is NP problem.
(correct me if I am wrong)
In this case we can enumerate all the sub-sequence by using
two pointers, namely, start and end; i.e., O(n^2) algorithm.
(assumed we use a head node of the same type LNode)
struct LNode
{
int data;
shared_ptr<LNode> next;
LNode(int d) : data(d){};
};
void cancelOut(shared_ptr<LNode> &head)
{
shared_ptr<LNode> start = head;
shared_ptr<LNode> end;
while(start){
end = start->next;
int sum = 0;
bool modified = false;
while(end){
sum += end->data;
if(sum == 0){
start->next = end->next;
modified = true;
break;
}
end = end->next;
}
if(!modified)
start = start->next;
}
}
Test results:
Test 0
original: {6, -6, 8, 4, -12, 9, 8, -8}
canceled out: {9}
Test 1
original: {4, 6, -10, 8, 9, 10, -19, 10, -18, 20, 25}
canceled out: {20, 25}
O(n) solution is provided.
We can divide strings in two types;
A type which does not contain ‘b’
and B type which contain ‘b’.
We can define matrix A and B as follows.
A[i]: the number of strings of length i in A type.
B[i]: the number of strings of length i in B type.
And the answer is A[n] + B[n]
Because B type strings with lengh i can be generated
by picking any sting in A type strings with length i-1,
and put ‘b’ in any position in the string.
There are total i positions to insert ‘b’,
thus, following equation holds between A[i] and B[i].
B[i] = i * A[i-1]
So, it is enough to compute A[i].
For considering strings in A, there are three possible
prefixes which end with ‘a’.
(because there is no constraint in the substring after ‘a’)
1. ‘a’ + A type strings with length i - 1
2. ‘ca’ + A type strings with length i - 2
3. ‘cca’ + A type strings with length i - 3
i.e., A[i] = A[i-1] + A[i-2] + A[i-3]
where A[1] = 2, A[2] = 4, A[3] = 7
We can compute matrix A iteratively.
As an example, the number of strings of length 3 is
A[3] + B[3] = A[3] + 3 * A[2] = 7 + 3*4 = 19.
int countStr(int n)
{
vector<int> A;
A.reserve(n + 1);
A[0] = 1, A[1] = 2, A[2] = 4, A[3] = 7;
for(int i = 4; i <= n; i++){
A[i] = A[i-1] + A[i-2] + A[i-3];
}
return A[n] + n*A[n-1];
}
O(n) solution is provided.
This solution uses the O(log(n)) space in the stack,
and I am not sure whether this will violate the requirements.
The key concept is that, while visiting each node,
we separate the node from heap and add it to BST
with maintaining the structure.
In doing so, we keep following invariant:
the current node is always the leftmost node of the heap
or the top element.
For this, we might need to modify the left pointer of
the parent IN HEAP temporarily and recover it appropriately.
The procedures are:
1. visit left child recursively
2. swap top elements with the cur node
3. detach cur node from heap and add it to BST
4. visit right child recursively
For step 3 separation, there are two cases.
(1) case 1. node is not the top element
: it is guaranteed that we are left children of
the parent IN HEAP.
so change the left pointer of the parent to the right child.
(2) case 2. node is the top element
: make the right child as a top element of the heap
Following is c++ implementation.
Node *heapToBST(Node *cur, Node *parent, Node **heapRef)
{
// 1. visit left child
if(cur->left){
// left pointer of myself can be modified
// for maintaining heap in case 1 of step 3.
// so it should be recovered from return value
cur->left = heapToBST(cur->left, cur, heapRef);
}
// 2. swap top elements with cur node
swap(cur->data, (*heapRef)->data);
// 3. detach cur node from heap
if(parent){
// case1. we are not the top element
// cur == parent->left by the invariant
parent->left = cur->right;
// heapify the heap again
heapify(*heapRef);
}
else {
// cur node is top element i.e., cur == head
*heapRef = (*heapRef)->right;
}
// 4. visit right child
if(cur->right){
heapToBST(cur->right, parent, heapRef);
}
return cur;
}
This is the test result
[Mean Heap]
0
1 2
3 4 5
6 7 8
9
[BST]
3
2 8
0 5 9
1 4 6
7
#include <iostream>
#include <thread>
#include <mutex>
#include <condition_variable>
int num = 0;
std::mutex mtx;
std::condition_variable cv;
int printNumber(bool even)
{
std::unique_lock<std::mutex> lck(mtx);
for(; num < 100;){
if(even){
while((num&1)) cv.wait(lck);
}
else {
while(!(num&1)) cv.wait(lck);
}
printf("%d\n", num++);
cv.notify_all();
}
return 0;
}
int main()
{
std::thread th[2];
th[0] = std::thread(printNumber, false);
th[1] = std::thread(printNumber, true);
for(int i = 0; i < sizeof(th) / sizeof(th[0]); i++)
th[i].join();
return 0;
}
We can do this simply by augmenting a tree node with the total number of nodes in the sub-tree rooted by the given node.
Pseudo code for doing this is:
int Tree::countRange(int start, int end)
{
// step1. find common root node of start and end
Node *nroot = findCommonRoot(start, end);
if(!nroot)
return 0;
// step2. get the number of nodes rooted by nroot
int count = nroot->getNodeCnt();
// step3. subtract the nodes whose value is less than range start
count -= countSmallerNode(nroot, start);
// step4. subtract the nodes whose value is larger than range end
count -= countLargerNode(nroot, end);
return count;
}
The auxiliary method implementations are:
Node *Tree::findCommonRoot(int start, int end)
{
Node *nroot = dynamic_cast<Node*>(root);
while(nroot){
if(end < nroot->getValue())
nroot = nroot->getLeft();
else if(start > nroot->getValue())
nroot = nroot->getRight();
else
break;
}
return nroot;
}
int Tree::countSmallerNode(Node *nroot, int start)
{
int count = 0;
Node *cur = nroot;
while(cur){
if(start < cur->getValue()){ // take left
cur = cur->getLeft();
}
else {
// all left nodes are smaller
if(cur->getLeft()){
count += cur->getLeft()->getNodeCnt();
}
if(start == cur->getValue())
break;
// cur node is also smaller
count++;
// take right
cur = cur->getRight();
}
}
return count;
}
int Tree::countLargerNode(Node *nroot, int end)
{
int count = 0;
Node *cur = nroot;
while(cur){
if(end > cur->getValue()){ // take right
cur = cur->getRight();
}
else {
// all right nodes are larger
if(cur->getRight()){
count += cur->getRight()->getNodeCnt();
}
if(end == cur->getValue())
break;
// cur node is larger
count++;
// take left
cur = cur->getLeft();
}
}
return count;
}
Those are test results
[TREE]
8
-12 14
-13 3 11
-15 -9 7
-10 -4 4
6
nodes in [-11, 5]: 5
nodes in [-16, -4]: 6
nodes in [-8, 1]: 1
nodes in [-9, 0]: 2
nodes in [6, 7]: 2
nodes in [3, 5]: 2
nodes in [-8, 12]: 7
nodes in [-3, 8]: 5
nodes in [-4, 8]: 6
nodes in [4, 2]: 0
nodes in [-15, 9]: 11
nodes in [-18, -6]: 5
nodes in [-2, 9]: 5
nodes in [-9, -5]: 1
nodes in [7, 9]: 2
nodes in [2, 12]: 6
nodes in [6, 13]: 4
nodes in [-9, -12]: 0
nodes in [9, 14]: 2
nodes in [-15, -8]: 5
My complete code is as follows.
#include <iostream>
#include <vector>
#include <algorithm>
#include <cassert>
using namespace std;
void printSet(vector<int>& iset){
if(iset.size() == 0){
printf("NULL");
return;
}
printf("{");
for(int j = 0; j < iset.size(); j++){
printf("%d%s", iset[j],
j < iset.size() - 1 ? ", " : "");
}
printf("}");
}
int genPowerSetIdx(vector<int> &pset, int newIdx, vector<vector<int>> &result)
{
int numRepeat = 1;
int value;
int size = result.size();
assert(newIdx < pset.size());
value = pset[newIdx];
for(int i = newIdx + 1; i < pset.size(); i++){
if(pset[i] != value)
break;
numRepeat++;
}
for(int i = 0; i < size; i++){
vector<int> newSet = result[i];
for(int j = 0; j < numRepeat; j++){
newSet.push_back(value);
result.push_back(newSet);
}
}
return newIdx + numRepeat;
}
void genPowerSet(vector<int> &pset, vector<vector<int>> &result)
{
sort(pset.begin(), pset.end());
result.push_back(vector<int>{});
for(int i = 0; i < pset.size(); ){
i = genPowerSetIdx(pset, i, result);
}
return;
}
int main()
{
vector<int> pset;
vector<vector<int>> result;
pset = {1, 2, 2};
genPowerSet(pset, result);
printf("{");
for(int i = 0; i < result.size(); i++){
vector<int> &iset = result[i];
printSet(iset);
if(i < result.size() - 1)
printf(", ");
}
printf("}");
}
result:
{NULL, {1}, {2}, {2, 2}, {1, 2}, {1, 2, 2}}
Provided an iterative solution with O(1) space complexity.
Assumed the tree is mutable.
The idea is to do the in-order traversal with O(1) space complexity
using the Morris traversal technique.
Note that the LCA node is the node at the minimum depth
among nodes visited after finding the first one.
- linuxchain October 17, 2016