zortlord
BAN USERThough much more readable than mine, String.split(string) operates in O(n) time already. So, effectively, your runtime is O(n+m) complexity where n is the number of characters and m is the number of tokens produced by the split. It is not as fast as simply reading the original String 1 time (character by character) and handling each token at that time ( O(n) complexity).
- zortlord July 17, 2015Though much more readable, String.split(string) operates in O(n) time already. So, effectively, your runtime is O(n+m) complexity where n is the number of characters and m is the number of tokens produced by the split. It is not as fast as reading the original String 1 time and handling each token at that time ( O(n) complexity).
- zortlord July 17, 2015To support any number of ".."s, a queue or list of valid path components will be required. This can be roughly completed in O(n) complexity and O(n) memory where n is the number of characters in the path:
public static String normalizePath(String path){
if(path == null){
throw new NullPointerException();
}
ArrayList<String> pathTokens = new ArrayList<String>();
//build the set of valid path tokens
StringBuilder tokenBuilder = new StringBuilder();
for(int i = 0; i < path.length(); i++){
char c = path.charAt(i);
//if this is a distinct token, must handle it
if(c == File.PATH_SEPARATOR){
String token = tokenBuilder.toString();
if(token.length() == 0){
continue;
}
if(DOUBLE.equals(token)){
if(pathTokens.size() == 0){
throw new IllegalArgumentException("invalid path");
}
pathTokens.removeLast();
}
else if(!SINGLE.equals(token)){
pathTokens.add(token);
}
tokenBuilder.setLength(0);
}
else{
tokenBuilder.append(c);
}
}
String token = tokenBuilder.toString();
if(token.length() > 0){
if(DOUBLE.equals(toke)){
if(pathTokens.size() == 0){
throw new IllegalArgumentException("invalid path");
}
pathTokens.removeLast();
}
else if(!SINGLE.equals(token)){
pathTokens.add(token);
}
}
//rebuild the valid output
tokenBuilder.setLength(0);
for(String token : pathTokens){
tokenBuilder.append(File.PATH_SEPARATOR);
tokenBuilder.append(token);
}
return tokenBuilder.toString();
}
O(n) complexity and O(log n) memory assuming the tree's balanced:
static class Node{
int value;
Node left, right;
}
public static Node flattenTree(Node root){
if(root == null){
throw new NullPointerException();
}
Node[] flatten = flatten(root);
return node[0];
}
private static Node[] flatten(Node node){
if(node.left != null){
Node[] leftResult = flatten(node.left);
node.left = null;
if(node.right != null){
Node[] rightResult = flatten(node.right);
leftResult[1].right = node;
node.right = rightResult[0];
leftResult[1] = rightResult[1];
return leftResult;
}
else{
leftResult[1].right = node;
leftResult[1] = node;
return leftResult;
}
}
else if(node.right != null){
Node[] rightResult = flatten(node.right);
node.right = rightResult[0];
rightResult[0] = node;
return rightResult;
}
return new Node[]{node, node};
}
This can be done with a BFS traversal of the tree. O(n) complexity and O(n) memory:
private static class Node{
int value;
Node left, right;
}
private static void zigZagTree(Node root){
if(root == null){
throw new NullPointerException();
}
Node node;
boolean isZig;
LinkedList<Node> unprocessedNodes = new LinkedList<Node>();
LinkedList<Node> nextNodes = new LinkedList<Node>();
LinkedList<Node> temp;
StingBuilder builder = new StringBuilder();
unprocessedNodes.add(root);
while(!unprocessedNodes.isEmpty()){
if(isZig){
while(!unprocessedNodes.isEmpty()){
node = unprocessedNodes.removeFirst();
builder.append(Integer.toString(node.value);
builder.append(", ");
if(node.left != null){
nextNodes.add(node.left);
}
if(node.right != null){
nextNodes.add(node.right);
}
}
}
else{
while(!unprocessedNodes.isEmpty()){
node = unprocessedNodes.removeLast();
builder.append(Integer.toString(node.value);
builder.append(", ");
if(node.right != null){
nextNodes.addFirst(node.right);
}
if(node.left != null){
nextNodes.addFirst(node.left);
}
}
}
isZig = !isZag;
temp = unprocessedNodes;
unprocessedNodes = nextNodes;
nextNodes = temp;
}
java.lang.System.out.println(builder.toString());
}
Then use DP to compute the solution in O(nm) complexity with O(nm) memory:
public static int countPaths(int[][] m){
if(m == null){
throw new NullPointerException();
}
int xDim = m.length;
int yDim = m[0].length;
if(xDim == 0 || yDim == 0){
throw new IllegalArgumentException();
}
int[][] track = new int[xDim][yDim];
//setup the base cases
track[xDim-1][yDim-1] = m[xDim-1][yDim-1];
for(int x = xDim-2; x > -1; x--){
if(m[x][yDim-1] == 1){
track[x][yDim-1] = track[x+1][yDim-1];
}
else{
track[x][yDim-1] = 0;
}
}
for(int y = yDim-2; y > -1; y--){
if(m[xDim-1][y] == 1){
track[xDim-1][y] = track[xDim-1][y+1];
}
else{
track[xDim-1][y] = 0;
}
}
//now compute the remaining scores
for(int x = xDim-2; x > -1; x--){
for(int y = yDim-2; y > -1; y--){
if(m[x][y] == 1){
track[x][y] = track[x+1][y] + track[x][y+1] + track[x+1][y+1];
}
else{
track[x][y] = 0;
}
}
}
return track[0][0];
}
Operates with runtime complexity of O(k) and memory O(1) where k is the position in the merged array:
public static int getMergedValue(int[] arr1, int[] arr2, int k){
//check for illegal inputs
if(arr1 == null || arr2 == null){
throw new NullPointerException();
}
if(k >= arr1.length + arr2.length){
throw new IllegalArgumentException();
}
//'merge' the arrays
int pos = 0;
int arr1Pos = 0;
int arr2Pos = 0;
while(pos < k && arr1Pos < arr1.length && arr2Pos < arr2.length){
if(arr1[arr1Pos] < arr2[arr2Pos]){
arr1Pos++;
}
else {
arr2Pos++;
}
pos ++;
}
//if one or the other array is exhausted
if(pos < k){
if(arr1Pos < arr1.length){
arr1Pos += (k - pos);
return arr1[arr1Pos];
}
else{
arr2Pos += (k - pos);
return arr2[arr2Pos];
}
}
//return the smallest of the current positions in the array
if(arr1[arr1Pos] < arr2[arr2Pos]){
return arr1[arr1Pos];
}
return arr2[arr2Pos];
}
There are a couple approaches to this that generally depend on the expected length of the Strings being checked:
If the Strings are short, then sorting the actual content of the strings and comparing should be relatively easy and quick. However, this will cost O(m log m) time and O(m) memory where m is the length of the Strings.
Other methods include creating an anagrammatic signature of the String for comparison. Assuming you're using a character set like ASCII, then this approach would run with complexity O(m + s) costing memory O(s) where s is the size of the character set. This can be done by simply creating an array of ints the size of the character set and counting the frequency of the characters. To compare the two strings, simply compare the character frequency
Using ASCII, the approximate point at which I would definately go with the anagrammatic signature would be where the Strings are of about size 32 or greater. Now, this would change dramatically if I were doing something like comparing Genetic sequences. Those would have a character set of 4 (maybe 5 if counting RNA) and the Strings would be typically long.
How about the entire tournament being represented by 64 bits where each bit corresponds to which team one a particular game (0 means the first team one and 1 means the second team won):
First round- 32 bits
Second round- 16 bits
Third round- 8 bits
Fourth round- 4 bits
Fifth round- 2 bits
Sixth round- 1 bit
Scoring would be simple- XOR the user prediction with reality and count the resulting 0s.
What is an Area? Assuming that area is something like this:
class Area{
int i, j;
@Override
public String toString(){...}
}
then the code will operate just fine. It should execute and output the following
[Area.toString() with i = 203, and j = 115]
end of function2()
203
end main
Given that you have ints i and j floating around in the OdemoA class which is unrelated to Area, that could be a problem. There's some sort of ambiguous relationship between OdemoA and Area that just isn't explained.
- zortlord July 14, 2015Let's actually format the code and see if it makes more sense:
class OdemoA
{
int i,j;
void function1(int i) {
System.out.println(i);
System.out.println("Inside function()");
}
void function2(Area a1) {
if(a1!=null) {
a1.i=203;
a1.j=115;
}
System.out.println(a1);
System.out.println("end of function2()");
}
public static void main(String args[]) {
OdemoA d1 = new OdemoA();
Area a2 = new Area();
d1.function2(a2);
System.out.println(a2.i);
System.out.println("end main");
}
}
O( log n) complexity (assuming balanced), O(1) memory:
public static class Node{
int val;
Node left, right;
}
public static Node findNode(int val, Node node){
while(node != null){
if (val < node.val){
node = node.left;
}
else if(val > node.val{
node = node.right;
}
else{
return node;
}
}
return null;
}
And now with formatting...
Since all directions are possible and you have exclusionary positions, a search approach would be necessary. Backtracking is probably easiest.
public static List<List<Move>> getValidPaths(int[][] m){
if(m == null){
throw new NullPointerException();
}
if(m.length == 0 || m[0].length == 0){
throw new IllegalArgumentException();
}
Worker worker = new Worker(m);
worker.execute();
return worker.results();
}
public static class Move{
int x, y;
}
public static class Worker{
int[][] m;
boolean[][] visited;
ArrayList<Move> currentPos;
ArrayList<ArrayList<Move>> results;
private Worker(int[][] m){
this.m = m;
this.visited = new boolean[this.m.length][this.m[0].length];
this.currentPos = new ArrayList<Move>();
this.results = new ArrayList<ArrayList<Move>>();
}
private void execute(){
executeRecur(0, 0);
}
private void executeRecur(int x, int y){
//disregard invalid positions, illegal moves, and places already visited
if(x < 0 || x > this.m.length || y < 0 || y > this.m[0].length || this.m[x][y] == 0 || this.visited[x][y]){
return;
}
//set visited and add to path
this.visited[x][y] = true;
Move move = new Move(x, y);
this.currentPos.add(move);
//if this is a solution, record it
if(x == this.m.length && y == this.m[0].length){
this.results.add(this.currentPos.clone());
}
//otherwise keep searching
else{
for(int newX = x -1; newX <= x + 1; newX ++){
for(int newY = y - 1; new Y <= y + 1; newY++){
executeRecur(newX, newY);
}
}
}
//remove this position from the path
this.currentPos.remove(currentPos.size() -1);
this.visited[x][y] = false;
}
private ArrayList<ArrayList<Move>> results(){
return this.results;
}
}
Since all directions are possible and you have exclusionary positions, a search approach would be necessary. Backtracking is probably easiest.
public static List<List<Move>> getValidPaths(int[][] m){
if(m == null){
throw new NullPointerException();
}
if(m.length == 0 || m[0].length == 0){
throw new IllegalArgumentException();
}
Worker worker = new Worker(m);
worker.execute();
return worker.results();
}
public static class Move{
int x, y;
}
public static class Worker{
int[][] m;
boolean[][] visited;
ArrayList<Move> currentPos;
ArrayList<ArrayList<Move>> results;
private Worker(int[][] m){
this.m = m;
this.visited = new boolean[this.m.length][this.m[0].length];
this.currentPos = new ArrayList<Move>();
this.results = new ArrayList<ArrayList<Move>>();
}
private void execute(){
executeRecur(0, 0);
}
private void executeRecur(int x, int y){
//disregard invalid positions, illegal moves, and places already visited
if(x < 0 || x > this.m.length || y < 0 || y > this.m[0].length || this.m[x][y] == 0 || this.visited[x][y]){
return;
}
//set visited and add to path
this.visited[x][y] = true;
Move move = new Move(x, y);
this.currentPos.add(move);
//if this is a solution, record it
if(x == this.m.length && y == this.m[0].length){
this.results.add(this.currentPos.clone());
}
//otherwise keep searching
else{
for(int newX = x -1; newX <= x + 1; newX ++){
for(int newY = y - 1; new Y <= y + 1; newY++){
executeRecur(newX, newY);
}
}
}
//remove this position from the path
this.currentPos.remove(currentPos.size() -1);
this.visited[x][y] = false;
}
private ArrayList<ArrayList<Move>> results(){
return this.results;
}
}
I forget the specific name of this approach (EDIT: It is a graph topology), but a DFS search on all contents if converted to a graph would work. O(n) runtime and O(n) memory for the searching of the graph.
Assumes that the incoming parameter are a List<List<String>> where the first item on each list is the name of the library and each following item is a dependency (something that should be loaded first)
private static class Vertex{
String name;
ArrayList<Vertex> dependencies;
private Vertex(String name){
this.name = name;
this.dependencies = new ArrayList<Vertex>();
}
}
private static class Graph{
ArrayList<Vertex> vertices;
HashMap<String, Vertex> vertexLookup;
private Graph(){
this.vertices = new ArrayList<Vertex>();
this.vertexLookup = new HashMap<String, Vertex>();
}
private Vertex getVertex(String libName){
Vertex v = this.vertexLookup.get(libName);
if(v == null){
v = new Vertex();
v.name = libName;
this.vertexLookup.put(libName, v);
this.vertices.add(v);
}
return v;
}
}
//(EDIT: Didn't describe this, but the original code returns null if there is a cycle)
public static List<String> getDependencyLoadOrder(List<List<String>> libraries){
//build the graph
Graph g = new Graph();
for(List<String> dependency : libraries){
Iterator<String> iter = dependency.iterator();
String libName = iter.next();
Vertex v = g.getVertex(libName);
while(iter.hasNext){
libName = iter.next;
Vertex child = g.getVertex(libName);
v.dependencies.add(child);
}
}
//construct the list for return
ArrayList<String> results = new ArrayList<String>(g.vertices.size());
HashMap<String, State> loadState = new HashMap<String, State>();
try{
for(int i = 0; i < g.vertices.size(); i++){
tryLoad(g.vertices.get(i), results, loadState );
}
}
catch(IllegalArgumentException e){
return null;
}
return results;
}
enum State{
LOADED, LOADING, NOT_LOADED
}
private static void tryLoad(Vertex v, ArrayList<String> results, HashMap<String, State> loadState ){
State state = loadState.get(v.name);
if(state == LOADING){
throw new IllegalArgumentException();
}
if(state == LOADED){
return;
}
loadState.put(v.name, LOADING);
for(Vertex child : v.dependencies){
tryLoad(child, results, loadState);
}
results.add(v.name);
loadState.put(v.name, LOADED);
}
This is one of those things where it cannot really be simplified. O(n^2) complexity and O(1) memory:
public static int surpasser(int[] arr){
if(arr == null){
throw new NullPointerException();
}
int maxSurpasser = 0;
for(int i = 0; i < arr.length; i++){
int localSurpasser = 0;
for(int j = i+1; j < arr.length; j++){
if(arr[i] < arr[j]){
localSurpasser++;
}
}
if(localSurpasser > maxSurpasser){
maxSurpasser = localSurpasser;
}
}
return maxSurpasser;
}
Roughly O(nm) complexity and O(nm) memory where n is the number of Strings and m is the length of the Strings.
public static int countAnagramSets(String[] strArr){
int count = 0;
HashSet<String> anagramSignatures = new HashSet<String>();
for(String str : strArr){
String signature = getSignature(str);
if(!anagramSignatures.contains(signature)){
count++;
anagramSignatures.add(signature);
}
}
return count;
}
private static String getSignature(String str){
//assuming default character set
//count each instance of a character in the string
int[] charCount = new int[26];
for(char c : str.toCharArray()){
charCount[(int)c - (int)'a']++;
}
//build a suitable signature
StringBuilder builder = new StringBuilder();
for(int i = 0; i < charCount.length; i++){
if(charCount[i] > 0){
builder.append((char)(i + (int)'a'));
builder.append(Integer.toString(charCount[i]);
}
}
return builder.toString();
}
Use a variation of the binary search to find the index where arr[i-1] > arr[i] and then return i. Complexity O(log n), memory (O(1)
public static int findRotation(int[] arr){
if(arr == null){
throw new NullPointerException():
}
if(arr.length < 2){
return 0;
}
if(arr.length == 2){
if(arr[0] < arr[1]){
return 0;
}
return 1;
}
int lo = 0;
int hi = arr.length -1;
int mid;
while(lo + 1 < hi){
mid = (lo >> 1) + (hi >> 1);
//if in the first half
if(arr[mid] < arr[lo]){
hi = mid;
}
//else in the second half
else if(arr[mid] >= arr[hi]){
lo mid;
}
//otherwise cannot tell
else{
return 0;
}
}
return hi;
}
//testing using JUnit
@Test(expected=NullPointerException.class)
public void test_null(){
findRotation(null);
}
@Test
public void test_empty(){
assertTrue(findRotation(new int[0]) == 0);
}
@Test
public void test_indeterminate(){
assertTrue(findRotation(new int[]{1}) == 0);
assertTrue(findRotation(new int[]{1, 1}) == 0);
assertTrue(findRotation(new int[]{1, 1, 1}) == 0);
assertTrue(findRotation(new int[]{1, 2, 3, 4}) == 0);
assertTrue(findRotation(new int[]{7, 7, 7, 7, 7, 7, 7}) == 0);
}
@Test
public void test(){
assertTrue(findRotation(new int[]{2, 1}) == 1);
assertTrue(findRotation(new int[]{2, 3, 1}) == 1);
assertTrue(findRotation(new int[]{3, 1, 2}) == 2);
}
@Test
public void lengthTest(){
int[] arr = new int[]{1, 1, 2, 2, 3, 3, 3, 4, 5, 7, 7};
for(int i = 0; i < arr.length; i++){
int[] rotArr = rotate(arr);
assertTrue(findRotation(rotArr)==i);
}
}
private int[] rotate(int[] arr, int rot){
int[] rotArr = new int[arr.length];
int position = 0;
for(;rot + position < arr.length; position++){
rotArr[position] = arr[rot + position];
}
for(int i = 0; i < rot; i++){
rotArr[position] = arr[i];
position++;
}
return rotArr;
}
The first step to any interviewing question is to ask about the size of the data. At that point, you, as an interviewer, would know if this is a big data issue or not. Since there is no dialog in these questions and the question doesn't volunteer that information, it's reasonable to go with the strictly graphing approach.
Alternatively, you could demonstrate your supposed prowess and code up a complete and bug-free distributed graphing algorithm in 30-40 minutes like the average length of a technical interview.
Consider this a graph and complete a BFS. At each vertex/node, track whether the position has already been visited. Complexity O(nm), memory O(nm) where n is the number of vertices and m is the average number of 'friend' connections.
static class Person{
String name;
String Person[] friends;
String visited = false;
public Person(String name){
this.name = name;
}
public void setFriends(Person[] friends){
this.friends = friends;
}
public void setVisited(boolean visited){
this.visited = visited;
}
public String getName(){
return this.name;
}
public Person[] getFriends(){
return this.friends;
}
public boolean isVisited(){
return this.visited;
}
public int hashCode(){
return this.name.hashCode();
}
public boolean equals(Object o){
if(o instanceof Person){
return this.name.equals(o.name);
}
return false;
}
}
public static void printFriendLevels(File file, String personName){
HashMap<String, Person> personMap = new HashMap<String, Person>();
//build up the Person graph from the file
BufferedReader in = null;
try{
in = new BufferedReader(new FileReader(file));
String line = null;
while( (line = in.readLine) != null){
String name = line.substring(0, line.indexof(':'));
Person person = personMap.get(name);
if(person == null){
person = new Person(name);
personMap.put(name, person);
}
ArrayList<String> friendNames = getFriendNames(line);
Person[] friends = new Person[friendNames.size()];
for(int i = 0; i < friendNames.size(); i++){
String friendName = friendNames[i];
Person friend = personMap.get(friendName);
if(friend == null){
friend = new Person(friendName);
personMap.put(friendName, friend);
}
friends[i] = friend;
}
person.setFriends(friends);
}
}
catch(IOException e){
e.printStackTrace();
java.lang.System.err.println("Failed");
}
finally{
if(in != null){
try{
in.close();
}
catch(IOException e){
//do nothing
}
}
}
//do BFS search on the contents
ArrayList<Person> currentLevel = new ArrayList<Person>();
ArrayList<Person> nextLevel = new ArrayList<Person>();
ArrayList<Person> temp;
{
Person person = personMap.get(personName);
if(person == null){
return null;
}
currentLevel.add(person);
}
final StringBuilder line = new StringBuilder();
line.append("Level ");
final String dash = " -";
int count = 1;
while(!currentLevel.isEmpty()){
line.setLength(6);
line.append(count++);
line.append(dash);
for(Person person : currentLevel){
line.append(person.getName());
line.append(',');
for(Person friend : person.getFriends()){
if(!friend.isVisited()){
nextLevel.add(friend);
friend.setVisited(true);
}
}
}
java.lang.System.out.println(line.toString());
currentLevel.clear();
temp = currentLevel;
currentLevel = nextLevel;
nextLevel = currentLevel;
}
}
private static ArrayList<String> getFriendNames(String str){
int start = str.indexof(":") + 2;
ArrayList<String> names = new ArrayList<String>();
int pos = start;
while(pos < str.length()){
if(str.charAt(pos) == ','){
names.add(str.subString(start, pos));
start = pos + 1;
}
pos++;
}
return names;
}
"Now, you do know the logic so it would be better to write the code yourself. that will help YOU understand it more clearly... and you will never forget it.. :)" [sic]
Imagine a single path from root to leaf as:
1 -> 2 -> 4 -> 8 [etc]
your algorithm must then check:
(), (1), (1, 2), (1, 2, 4), (1, 2, 4, 8), (2), (2, 4), (2, 4, 8), (4), (4, 8), (8)
But your incorrect logic will only test:
(), (1), (1, 2), (1, 2, 4), (1, 2, 4, 8)
Perhaps you should take your own advice and code the algorithm (and, gasp, test it) since your logic is wrong. I guarantee that you will remember it. Or at least remember to not talk down to people.
- zortlord June 23, 2015Runtime complexity of O(n^2) with memory O(n) (if this were a balanced binary tree it would be O(n Log n) and O(log n) respectively):
static class Node{
int value;
Node left, right;
}
//use non-recursive DFS to start a path towards the leaves of each branch
public static void findSumPaths(Node root, int sum){
Stack<Node> dfsPosition = new Stack<Node>();
dfsPosition.push(root);
while(!dfsPosition.isEmpty()){
StringBuilder output = new StringBuilder();
output.append('(');
Node node = dfsPosition.pop();
makeOutputFromPosition(node, 0, sum, output);
if(node.right != null){
dfsPosition.push(node.right);
}
if(node.left != null){
dfsPosition.push(node.left);
}
}
}
//Recursive DFS search from each specified node checking if the current path taken meets the sum
public static void makeOutputFromPosition(Node node, int currentSum, int sum, StringBuilder output){
currentSum += node.value;
boolean addedComma = false;
String intString = Integer.toString(node.value);
if(output.length() > 1){
output.append(',');
addedComma = true;
}
output.append(intString);
if(currentSum == sum){
output.append(')');
System.out.println(output.toString());
output.setLength(output.length()-1);
}
if(node.left != null){
makeOutputFromPosition(node.left, currentSum, sum, output);
}
if(node.right != null){
makeOutputFromPosition(node.right, currentSum, sum, output);
}
output.setLength(output.length() - intString.length());
if(addedComma){
output.setLength(output.length() -1 );
}
}
How about a non-searching approach. Here's one for traversing the list. Unfortunately it's still O(n^2) complexity, but now O(n) memory:
public static int longestIncreasingSubsequenceSum(int[] arr){
if(arr == null){
throw new NullPointerException("\"arr\" may not be null");
}
//populate the lists of lengths
int[] lengths = new int[arr.length];
int[] sums = new int[arr.length];
int bestLength;
int bestSum;
for(int i = 0; i < arr.length; i++){
for(int j = i-1; j > -1; j--){
if(arr[j] < arr[i] && lengths[j] > lengths[i]){
lengths[i] = lengths[j];
sums[i] = sums[j];
}
}
lengths[i] ++;
sums[i] += arr[i];
if(length[i] > bestLength){
bestLength = length[i];
bestSum = sums[i];
}
}
return bestSum;
}
How's about non-recursive... Still O(n) complexity but, unfortunately O(n) memory due to the N-ary-ness.
public static class NArayNode{
NArayNode[] children;
int value;
}
public static boolean isSumPropertyMet(NAryNode root){
if(root == null){
throw new NullPointerException("\"root\" may not be null");
}
Stack<NArayNode> nodesToExplore = new Stack<NAryNode>();
nodesToExplore.push(root);
while(!nodesToExplore.isEmpty()){
NAryNode node = nodesToExplore.pop();
int childSum = 0;
for(NArayNode child : node.children){
nodesToExplore.push(child);
childSum += child.value;
}
if(childSum != node.value){
return false;
}
}
return true;
}
How about transforming the problem into a directed graph (larger numbers with an array index greater and value greater would have a connection) and then searching the graph using a DFS for the longest path until a deadend? This approach would take O(n^2) complexity and O(n^2) memory to construct the graph and O(n) for the DFS tracking.
public static int longestIncreasingSubsequenceSum(int[] arr){
//construct the graph
ArrayList<Integer>[] connections = new ArrayList<Integer>[arr.length];
for(int i = 0; i < arr.length; i++){
connections[i] = new ArrayList<Integer>();
for(int j = 0; j < i; j++){
if(arr[j] < arr[i]){
connections[i].add(j);
}
}
}
//find the best sum
int bestLength = -1;
int bestSum = Integer.MIN_VALUE;
int[] localBestLength = new int[arr.length];
int[] localBestSum = new int[arr.length];
for(int i = 0; i < arr.length; i++){
dfs(i, arr, connections, localBestLength, localBestSum);
if(localBestLength[i] > bestLength){
bestLength = localBestLength[i];
bestSum = localBestSum[i];
}
}
return bestSum;
}
private static void dfs(int i, int[] arr, ArrayList<Integer>[] connections, int[] localBestLength, int[] localBestSum){
int childBestSum = 0;
int childBestLength = 0;
for(Integer child : connections[i]){
if(localBestLength[child] == 0){
dfs(child, arr, connections, localBestLength, localBestSum);
}
if(localBestLength[child] > childBestLength){
childBestLength = localBestLength[child];
childBestSum = localBestSum[child];
}
}
localBestLength[i] = childBestLength + 1;
localBestSum[i] = childBestSum + arr[i];
}
Assuming that the interval collection already coming in contains no overlaps and is sorted, then this algorithm is very simple and can be done in O(n) complexity with O(1) memory. Disassembles the old LinkedList and creates a new one:
public static LinkedList<int[]> insert(LinkedList<int[]> intervals, int[] newInterval){
if(intervals == null || newInterval == null){
throw new NullPointerException();
}
LinkedList<int[]> results = new LinkedList<int[]>();
while(!intervals.isEmpty()){
int[] oldInterval = intervals.removeFirst();
//new interval starts before the old interval
if( newInterval[0] < oldInterval[0] ){
//new interval is completely before this old one
if(newInterval[1] < oldInterval[0]){
results.add(newInterval);
results.add(oldIntervall);
while(!intervals.isEmpty()){
results.add(intervals.removeFirst());
}
return results;
}
//new interval starts before and overlaps old interval
else if(newInterval[1] < oldInterval[1]){
newInterval[1] = olderInterval[1];
results.add(newInterval);
while(!intervals.isEmpty()){
results.add(intervals.removeFirst());
}
return results;
}
//otherwise new interval entirely contains old interval. Do nothing
}
//now know new interval starts after old interval
//newInterval overlaps the end of the old interval
else if(newInterval[0] < oldInterval[1]){
//new Interval within old interval
if(newInterval[1] < oldInterval[1]){
results.add(oldInterval);
while(!intervals.isEmpty()){
results.add(intervals.removeFirst());
}
return results;
}
//new interval extends past old interval
else{
newInterval[0] = oldInterval[0];
}
}
//new interval is after old interval
else{
results.add(oldInterval);
}
}
return results;
}
Complexity O(n) and Memory O(1):
since the array is sorted A[i] + A[j] > val => A[i] + A[j+c] > val
we can use a left index and right index that move towards each other to compute this value
public static int getCountCombsGreater(int[] arr, int val){
int count = 0;
int left = 0;
int right = arr.length -1;
while(left < right){
//while the left index should be moved
while(arr[left] + arr[right] < val && left < right){
left++;
}
if(left < right){
count += arr.length - right;
}
right--;
while(right-1 > left && arr[right] == arr[right-1]){
right --;
}
}
return count;
}
This can be done in roughly O(nm) complexity and O(nm) memory where n is the number of elements that are in all lists and m is the number of lists
Create a Set Union to track the Intersections of lists
Use a HashMap to map integers to their set
public static void printIntersections(LinkedList<Integer> lists){
HashMap<Integer,Integer> setUnionMap = new HashMap<Integer,Integer>();
HashMap<Integer,Integer> elementMembershipMap = new HashMap<Integer,Integer>();
//attach all the integers together and build the set union list up
for(LinkedList<Integer> list : lists){
Iterator<Integer> iterator = list.iterator();
if(!interator.hasNext()){
continue;
}
Integer name = iterator.next();
setUnionMap.put(name, null);
while(iterator.hasNext()){
Integer element = iterator.next();
Integer parent = elementMembershipMap.get(element);
if(parent == null){
elementMembershipMap.put(element, name);
}
else{
parent = getParent(parent, setUnionMap);
setUnionMap.put(name, parent);
}
}
}
//invert the set union to construct collections of list names
HashMap<Integer, Collection<Integer>> groupMembershipMap = new HashMap<Integer, Collection<Integer>>();
for(Integer setName : setUnionMap.keySet()){
Integer parent = getParent(setName, setUnionMap);
if(parent == null){
ArrayList<Integer> col = new ArrayList<Integer>();
col.add(setName);
groupMembershipMap.put(setName, col);
}
else{
Collection<Integer> col = groupMembershipMap.get(parent);
col.add(setName);
}
}
//output the list names
for(Collection<Integer> valueSet : groupMembershipMap.getValues()){
StringBuilder line = new StringBuilder();
Iterator<Integer> iterator = valueSet.iterator();
while(iterator.hasNext()){
if(line.length() > 0){
line.append(", ");
}
line.append(String.parseInt(iterator.next()));
}
System.out.println(line.toString());
}
}
private static Integer getParent(Integer lookUp, HashMap<Integer,Integer> unionMap){
Integer parentName = unionMap.get(lookUp);
if(parentName == null){
return lookUp;
}
Integer root = getParent(parentName, unionMap);
if(root != parentName){
unionMap.put(lookUp, root);
}
return root;
}
Simple binary search looking for index i where arr[i] == 1 and arr[i-1] == 0, return i-1->
Complexity = O(log n), memory = O(1)
public static int countZeros(int[] arr){
//handle base cases
if(arr == null){
throw new NullPointerException();
}
if(arr.length == 0){
return 0;
}
if(arr[0] == 1){
return 0;
}
if(arr[length-1] == 0){
return length;
}
//handle binary search
int lo = 0;
int hi = arr.length - 1;
int mid;
while(hi - lo > 1){
mid = (lo >> 1) + (hi >> 1);
if(arr[mid] == 1){
hi = mid;
}
else{
lo = mid;
}
}
return hi;
}
O(n) runtime and O(n) memory:
public boolean hasTwoNumbersThatSumValue(int[] arr, int num){
if(arr == null){
throw new NullPointerException();
}
HashSet<Integer> alreadyEncountered = new HashSet<Integer>();
for(int i = 0; i < arr.length; i++){
int val = arr[i];
if(alreadyEncountered.contains(num-val)){
return true;
}
alreadyEncountered.add(val);
}
return false;
}
public static String substring(String str, int start, int end){
if(str == null){
throw new NullPointerException();
}
if(end >= str.length() || start > end){
throw new IllegalArgumentException();
}
StringBuilder substring = new StringBuilder();
for(int i = start; i < end; i++){
substring.append(str.charAt(i));
}
return substring.toString();
}
All that has to be done is to check that every i element is greater than i-1. Count the instances of that and return true iff the count == 1.
O(n) runtime and O(1) memory:
public static boolean sortIn1Swap(int[] arr){
if(arr == null){
throw new NullPointerException();
}
if(arr.length < 2){
throw new IllegalArgumentException();
}
//flag that tracks if the single instance of a misordered element has been found
boolean needsASwap = false;
for(int i = 1; i < arr.length; i++){
if(arr[i] < arr[i-1]){
//if a misordered element has already been found, return false
if(needsASwap){
return false;
}
needsASwap = true;
}
}
return needsASwap
}
Will operate in O(n^(4/3)) time with O(n^(1/3)) memory
public static ArrayList<Integer> getAllPaired2Sums(int n){
//compute all cube roots up to the cube root of n
HashSet<Integer> cubes = new HashSet<Integer>();
int index = 0;
int cube = 0;
while(cube < n){
index++;
cube = index * index * index;
cubes.add(cube);
}
ArrayList<Integer> results = new ArrayList<Integer>();
//compute pairs
outer:for(index = 9; index <= n; index++){
boolean hasPair = false;
for(int i = 1; (cube = i*i*i) < index; i++){
if(cubes.contains(index - cube)){
if(hasPair){
results.add(index);
continue outer;
}
else{
hasPair = true;
}
}
}
}
return results;
}
O(n) complexity and O(1) memory consumption
public static class MinMaxPair{
public int min = Integer.MAX_VALUE;
public int max = Integer.MIN_VALUE;
}
public MinMaxPair getMinMax(int[] arr){
if(arr == null){
throw new NullPointerException();
}
if(arr.length == 0){
throw new IllegalArgumentException();
}
MinMaxPair result = new MinMaxPair();
result.min = arr[0];
result.max = arr[0];
for(int i = 1; i < arr.length; i++){
if(arr[i] < result.min){
result.min = arr[i];
}
else if(arr[i] > result.max){
result.max = arr[i];
}
}
return result;
}
public static int nestedParenthesisDepth(String input) throws IllegalArgumentException {
if(input == null){
throw new IllegalArgumentException();
}
int openParenCount = 0;
int maxOpenParen = 0;
for(int i = 0; i < input.length(); i++){
char c = input.charAt(i);
if('(' == c){
openParenCount++;
if(openParenCount > maxOpenParen){
maxOpenParen = openParentCount;
}
}
else if(')' == c){
openParenCount--;
if(openParenCount < 0){
throw new IllegalArgumentException();
}
}
}
if(openParenCount > 0){
throw new IllegalArgumentException();
}
return maxOpenParen;
}
Edit: added fix to catch "a(b" cases. Thanks anonymous and JSDUDE
- zortlord June 04, 2015
Though much more readable than mine, String.split(string) operates in O(n) time already. So, effectively, your runtime is O(n+m) complexity where n is the number of characters and m is the number of tokens produced by the split. It is not as fast as reading the original String 1 time (character by character) and handling each token at that time ( O(n) complexity).
- zortlord July 17, 2015