Citigroup Interview Question
Financial Software DevelopersCountry: United States
The implementation seems good for Cache, however I think is this not Least Recently Used Cash. For example, in a multithreaded env, Thread1 added one entry and continuously reading the value by get(). On the other hand Other threads are putting the value in the cash, and probably not reading them back at all. Now the Cash reaches its Maximum limit and oldest entry needs to be deleted. Here Oldest means the entry which has been not used since longest time, and not which has been in the Cache for longest time. Since the first ( thought oldest ) is being used very frequently for read operation, it should not be deleted from cache
In the get() method you need to update the queue to move up the recently used key to top of queue.
agreed with Samuel
map.get(key)
queue.remove(key)
queue.insert(key)
(with the if conditions in place)
The Above code seems to have Issue :
a. LRU would mean that if a key is being reffered again and again then it should be updated in the dataStructure with the new timestamp , the above code is not handling that.
b. The above code is using concurrent HashMap , and concurrentLinkedList , but what about the scenario when compsite operation needs to be performed, like you have to delete from Queue only if its present in the map. or insert in the map after inserting in the queue.
c. This code is not considering eviction policy or how about if some key needs to be unloaded from the Cache.
The below code which I have developed is handling those scenario
package cache;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
public class LRUCache<K,V> {
private ConcurrentLinkedQueue<K> concurrentLinkedQueue = new ConcurrentLinkedQueue<K>();
private ConcurrentHashMap<K,V> concurrentHashMap = new ConcurrentHashMap<K, V>();
private ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
private Lock readLock = readWriteLock.readLock();
private Lock writeLock = readWriteLock.writeLock();
int maxSize=0;
public LRUCache(final int MAX_SIZE){
this.maxSize=MAX_SIZE;
}
public V getElement(K key){
readLock.lock();
try {
V v=null;
if(concurrentHashMap.contains(key)){
concurrentLinkedQueue.remove(key);
v= concurrentHashMap.get(key);
concurrentLinkedQueue.add(key);
}
return v;
}finally{
readLock.unlock();
}
}
public V removeElement(K key){
writeLock.lock();
try {
V v=null;
if(concurrentHashMap.contains(key)){
v=concurrentHashMap.remove(key);
concurrentLinkedQueue.remove(key);
}
return v;
} finally {
writeLock.unlock();
}
}
public V addElement(K key,V value){
writeLock.lock();
try {
if(concurrentHashMap.contains(key)){
concurrentLinkedQueue.remove(key);
}
while(concurrentLinkedQueue.size() >=maxSize){
K queueKey=concurrentLinkedQueue.poll();
concurrentHashMap.remove(queueKey);
}
concurrentLinkedQueue.add(key);
concurrentHashMap.put(key, value);
return value;
} finally{
writeLock.unlock();
}
}
}
public class LRUCache {
private int cacheSize;
public LRUCache(int size) {
super(size, 0.75f, true);
this.cacheSize = size;
}
@Override
protected boolean removeEldestEntry(
java.util.Map.Entry<Integer, String> eldest) {
// remove the oldest element when size limit is reached
return size() > cacheSize;
}
}
Hi , you can use linkedhashmap to implement LRU cache in Java.
Here is the code ....
package algorithms;
import java.util.LinkedHashMap;
import java.util.Map;
public class LRU {
private static final int MAX_ENTRIES = 3;
Map<Integer,Integer> m = new LinkedHashMap<Integer, Integer>(MAX_ENTRIES){
//Override this , if it returns true the eldest entry in the map is removed
protected boolean removeEldestEntry(Map.Entry eldest) {
return m.size() > MAX_ENTRIES;
}
};
// model function
int getValue(int x){
//some complex calculations
return ++x;
}
//to get value from cache
int getCacheValue(int x){
return m.get(x);
}
int get(int x){
int result=0;
if(m.containsKey(x)){
result=getCacheValue(x);
}else{
result=getValue(x);
}
m.put(x, result);
return result;
}
public static void main(String[] args) {
LRU obj = new LRU();
for (int i = 1; i <= 5; i++) {
obj.get(i);
System.out.println(obj.m);
}
}
}
This is great - just that it has a small bug. The get(int x) method is blindly calling m.put(x, result). It had to first remove this entry from the LinkedHashMap and then call the put(), so that this key becomes the tail in the insertion order. According to the java doc for LinkedHashMap ==> "Note that insertion order is not affected if a key is re-inserted into the map. (A key k is reinserted into a map m if m.put(k, v) is invoked when m.containsKey(k) would return true immediately prior to the invocation.)".
package misc;
import java.util.concurrent.ConcurrentLinkedQueue;
/**
* @author mayurjain
*
*/
public class LruCache {
private final int maxSize;
private ConcurrentLinkedQueue<String> queue = new ConcurrentLinkedQueue<String>();
public LruCache(int size) {
this.maxSize = size;
}
public void insertCache(String data) {
while (queue.size() >= maxSize) {
queue.poll();
}
queue.add(data);
}
public String getfromCache(String data) {
String dat = null;
if (queue.contains(data)) {
queue.remove(data);
dat = data;
}
insertCache(data);
return dat;
}
public void printqueue() {
System.out.println(queue);
}
public static void main(String[] args) {
LruCache cache = new LruCache(3);
cache.getfromCache("mayur");
cache.getfromCache("sumit");
cache.getfromCache("amit");
cache.printqueue();
cache.getfromCache("sumit");
cache.printqueue();
cache.getfromCache("ankit");
cache.printqueue();
}
}
are enque and deque operations atomic in combination in a concurrent linked queue ?
For ex, lets say we have 2 threads and t1 finds the size to be maximum and goes on to remove (dequeue) element which happens at the head. Before it could enqueue the element, t2 came in and added (size was reduced via dequeue by t1), As a result, t1 is not able to add again.
Adding into cache is a multi step operation of checking the size, removing the first and adding a new element. Don't it need to be atomic (synchronised) ??
Please suggest
Following code implements LRU cache using linklist.
package linkList;
import java.util.*;
/*
* we are using linked list with hashmap for Lru.
* Reason behind this is ,since HashMap is able to store data and key but
* how do we get info about least recently used cache value?. For this we need to keep track all inserted data into map
* by using linked list. When inserting new values , add this value to the top of linked list. When key,value is already
* present then refresh it by removing it from the linked list and adding it to top of the linked list.
* */
public class LRUImpl {
private static final int String = 0;
public interface CacheStrategy<K, T>{
T get(K key);
void put(K key,T data);
}
class CacheStrategyLRU<K, T> implements CacheStrategy<K, T> {
class Node{
K key;
T data;
Node next;
Node prev;
public Node(K key, T data){
this.key = key;
this.data = data;
}
}
Node head,tail;
Map< K, Node> map;
int maxsize;
public CacheStrategyLRU(int mxsize){
this.maxsize = mxsize;
map = new HashMap<K ,Node>();
head = new Node(null,null);
tail = new Node(null,null);
head.next=tail;
tail.prev=head;
}
private void attach(Node head,Node node){
node.prev = head;
node.next = head.next;
head.next.prev=node;
head.next = node;
}
private void remove(Node node){
node.prev.next = node.next;
node.next.prev = node.prev;
}
@Override
public T get(K key) {
Node node = map.get(key);
if(node==null){
return null;
}
if(map.size()==1){
return node.data;
}
remove(node);
attach(head,node);
return node.data;
}
@Override
public void put(K key, T data) {
if(maxsize<=0){
return;
}
Node node = map.get(key);
if(node!=null){
remove(node);
attach(head,node);
node.data = data;
}else{
if(map.size() >= maxsize){
remove(tail.prev);//tail is node pointer ,its not containg any node so delete tail.prev
map.remove(tail.prev.key);
}
Node nd = new Node(key,data);
map.put(key, nd);
attach(head,nd);
}
}
}
}
import java.util.LinkedHashMap;
import java.util.Map;
public class LRUCacheMap<K, V> extends LinkedHashMap<K, V>{
@Override
protected boolean removeEldestEntry(Map.Entry<K, V> entry) {
return size() > size;
}
@SuppressWarnings("compatibility:7854546038382053715")
private static final long serialVersionUID = -4607438325401116607L;
int size = 0;
public LRUCacheMap(int size) {
super(size, 0.25f, true);
this.size = size;
}
public static <K,V>LRUCacheMap<K, V> newinstance(int size){
return new LRUCacheMap(size);
}
public void setMaxSize(int maxsize){
this.size = maxsize;
}
public static void main(String[] args) {
// LRUCacheMap lRUCacheMap = new LRUCacheMap();
LRUCacheMap<String,String> lruCache = LRUCacheMap.newinstance(3);
lruCache.put("1", "2");
lruCache.put("2", "3");
lruCache.put("3", "4");
lruCache.put("4", "2");
lruCache.put("5", "1");
System.out.println(lruCache);
}
}
class LRUCache {
static int size=0,capacity=0;
LinkedHashMap<Integer, Integer> cacheStore;
/*Inititalize an LRU cache with size N */
public LRUCache(int N) {
size=0;
capacity=N;
cacheStore= new LinkedHashMap<Integer, Integer>(N);
}
/*Returns the value of the key x if
present else returns -1 */
public synchronized int get(int x) {
if(cacheStore.get(x)!=null){
int num=cacheStore.get(x);
cacheStore.remove(x);
cacheStore.put(x, num);
return num;
}
return -1;
}
/*Sets the key x with value y in the LRU cache */
public synchronized void set(int x, int y) {
int value=get(x);
if(value!=-1){
cacheStore.remove(x);
cacheStore.put(x, y);
return;
}
if(size<capacity){
cacheStore.put(x, y);
size++;
}
else{
Iterator<Integer> itr=cacheStore.keySet().iterator();
cacheStore.remove(itr.next());
cacheStore.put(x, y);
}
}
}
Simple Java implementation of LRU cache :-
public class LRUCache<K, V> extends LinkedHashMap<K, V> {
int capacity;
public LRUCacheImplementationUsingLinkedHashMap(int capacity) {
super(capacity + 1, 1.0f, true);
this.capacity = capacity;
}
protected boolean removeEldestEntry(Map.Entry<K, V> eldest) {
return (size() > capacity);
}
}
- java.interviews.questions September 13, 2013