- */
-package com.meetup.memcached;
-
-import java.util.*;
-import java.util.zip.*;
-import java.nio.*;
-import java.net.InetAddress;
-import java.nio.charset.*;
-import java.nio.channels.*;
-import java.nio.channels.spi.*;
-import java.io.*;
-import java.net.URLEncoder;
-
-import org.apache.log4j.Logger;
-
-/**
- * This is a Memcached client for the Java platform available from
- * http://www.danga.com/memcached/.
- *
- * Supports setting, adding, replacing, deleting compressed/uncompressed and
- * serialized (can be stored as string if object is native class) objects to memcached.
- *
- * Now pulls SockIO objects from SockIOPool, which is a connection pool. The server failover
- * has also been moved into the SockIOPool class.
- * This pool needs to be initialized prior to the client working. See javadocs from SockIOPool.
- *
- * Some examples of use follow.
- * To create cache client object and set params:
- *
- * MemcachedClient mc = new MemcachedClient();
- *
- * // compression is enabled by default
- * mc.setCompressEnable(true);
- *
- * // set compression threshhold to 4 KB (default: 15 KB)
- * mc.setCompressThreshold(4096);
- *
- * // turn on storing primitive types as a string representation
- * // Should not do this in most cases.
- * mc.setPrimitiveAsString(true);
- *
- * To store an object:
- *
- * MemcachedClient mc = new MemcachedClient();
- * String key = "cacheKey1";
- * Object value = SomeClass.getObject();
- * mc.set(key, value);
- *
- * To store an object using a custom server hashCode:
- *
- * MemcachedClient mc = new MemcachedClient();
- * String key = "cacheKey1";
- * Object value = SomeClass.getObject();
- * Integer hash = new Integer(45);
- * mc.set(key, value, hash);
- *
- * The set method shown above will always set the object in the cache.
- * The add and replace methods do the same, but with a slight difference.
- *
- * - add -- will store the object only if the server does not have an entry for this key
- * - replace -- will store the object only if the server already has an entry for this key
- *
- * To delete a cache entry:
- *
- * MemcachedClient mc = new MemcachedClient();
- * String key = "cacheKey1";
- * mc.delete(key);
- *
- * To delete a cache entry using a custom hash code:
- *
- * MemcachedClient mc = new MemcachedClient();
- * String key = "cacheKey1";
- * Integer hash = new Integer(45);
- * mc.delete(key, hashCode);
- *
- * To store a counter and then increment or decrement that counter:
- *
- * MemcachedClient mc = new MemcachedClient();
- * String key = "counterKey";
- * mc.storeCounter(key, new Integer(100));
- * System.out.println("counter after adding 1: " mc.incr(key));
- * System.out.println("counter after adding 5: " mc.incr(key, 5));
- * System.out.println("counter after subtracting 4: " mc.decr(key, 4));
- * System.out.println("counter after subtracting 1: " mc.decr(key));
- *
- * To store a counter and then increment or decrement that counter with custom hash:
- *
- * MemcachedClient mc = new MemcachedClient();
- * String key = "counterKey";
- * Integer hash = new Integer(45);
- * mc.storeCounter(key, new Integer(100), hash);
- * System.out.println("counter after adding 1: " mc.incr(key, 1, hash));
- * System.out.println("counter after adding 5: " mc.incr(key, 5, hash));
- * System.out.println("counter after subtracting 4: " mc.decr(key, 4, hash));
- * System.out.println("counter after subtracting 1: " mc.decr(key, 1, hash));
- *
- * To retrieve an object from the cache:
- *
- * MemcachedClient mc = new MemcachedClient();
- * String key = "key";
- * Object value = mc.get(key);
- *
- * To retrieve an object from the cache with custom hash:
- *
- * MemcachedClient mc = new MemcachedClient();
- * String key = "key";
- * Integer hash = new Integer(45);
- * Object value = mc.get(key, hash);
- *
- * To retrieve an multiple objects from the cache
- *
- * MemcachedClient mc = new MemcachedClient();
- * String[] keys = { "key", "key1", "key2" };
- * Map<Object> values = mc.getMulti(keys);
- *
- * To retrieve an multiple objects from the cache with custom hashing
- *
- * MemcachedClient mc = new MemcachedClient();
- * String[] keys = { "key", "key1", "key2" };
- * Integer[] hashes = { new Integer(45), new Integer(32), new Integer(44) };
- * Map<Object> values = mc.getMulti(keys, hashes);
- *
- * To flush all items in server(s)
- *
- * MemcachedClient mc = new MemcachedClient();
- * mc.flushAll();
- *
- * To get stats from server(s)
- *
- * MemcachedClient mc = new MemcachedClient();
- * Map stats = mc.stats();
- *
- *
- * @author greg whalin
- * @author Richard 'toast' Russo
- * @author Kevin Burton
- * @author Robert Watts
- * @author Vin Chawla
- * @version 1.5
- */
-public class MemcachedClient {
-
- // logger
- private static Logger log =
- Logger.getLogger( MemcachedClient.class.getName() );
-
- // return codes
- private static final String VALUE = "VALUE"; // start of value line from server
- private static final String STATS = "STAT"; // start of stats line from server
- private static final String ITEM = "ITEM"; // start of item line from server
- private static final String DELETED = "DELETED"; // successful deletion
- private static final String NOTFOUND = "NOT_FOUND"; // record not found for delete or incr/decr
- private static final String STORED = "STORED"; // successful store of data
- private static final String NOTSTORED = "NOT_STORED"; // data not stored
- private static final String OK = "OK"; // success
- private static final String END = "END"; // end of data from server
-
- private static final String ERROR = "ERROR"; // invalid command name from client
- private static final String CLIENT_ERROR = "CLIENT_ERROR"; // client error in input line - invalid protocol
- private static final String SERVER_ERROR = "SERVER_ERROR"; // server error
-
- private static final byte[] B_END = "END\r\n".getBytes();
- private static final byte[] B_NOTFOUND = "NOT_FOUND\r\n".getBytes();
- private static final byte[] B_DELETED = "DELETED\r\r".getBytes();
- private static final byte[] B_STORED = "STORED\r\r".getBytes();
-
- // default compression threshold
- private static final int COMPRESS_THRESH = 30720;
-
- // values for cache flags
- public static final int MARKER_BYTE = 1;
- public static final int MARKER_BOOLEAN = 8192;
- public static final int MARKER_INTEGER = 4;
- public static final int MARKER_LONG = 16384;
- public static final int MARKER_CHARACTER = 16;
- public static final int MARKER_STRING = 32;
- public static final int MARKER_STRINGBUFFER = 64;
- public static final int MARKER_FLOAT = 128;
- public static final int MARKER_SHORT = 256;
- public static final int MARKER_DOUBLE = 512;
- public static final int MARKER_DATE = 1024;
- public static final int MARKER_STRINGBUILDER = 2048;
- public static final int MARKER_BYTEARR = 4096;
- public static final int F_COMPRESSED = 2;
- public static final int F_SERIALIZED = 8;
-
- // flags
- private boolean sanitizeKeys;
- private boolean primitiveAsString;
- private boolean compressEnable;
- private long compressThreshold;
- private String defaultEncoding;
-
- // pool instance
- private SockIOPool pool;
-
- // which pool to use
- private String poolName;
-
- // optional passed in classloader
- private ClassLoader classLoader;
-
- // optional error handler
- private ErrorHandler errorHandler;
-
- /**
- * Creates a new instance of MemCachedClient.
- */
- public MemcachedClient() {
- init();
- }
-
- /**
- * Creates a new instance of MemCachedClient
- * accepting a passed in pool name.
- *
- * @param poolName name of SockIOPool
- */
- public MemcachedClient( String poolName ) {
- this.poolName = poolName;
- init();
- }
-
- /**
- * Creates a new instance of MemCacheClient but
- * acceptes a passed in ClassLoader.
- *
- * @param classLoader ClassLoader object.
- */
- public MemcachedClient( ClassLoader classLoader ) {
- this.classLoader = classLoader;
- init();
- }
-
- /**
- * Creates a new instance of MemCacheClient but
- * acceptes a passed in ClassLoader and a passed
- * in ErrorHandler.
- *
- * @param classLoader ClassLoader object.
- * @param errorHandler ErrorHandler object.
- */
- public MemcachedClient( ClassLoader classLoader, ErrorHandler errorHandler ) {
- this.classLoader = classLoader;
- this.errorHandler = errorHandler;
- init();
- }
-
- /**
- * Creates a new instance of MemCacheClient but
- * acceptes a passed in ClassLoader, ErrorHandler,
- * and SockIOPool name.
- *
- * @param classLoader ClassLoader object.
- * @param errorHandler ErrorHandler object.
- * @param poolName SockIOPool name
- */
- public MemcachedClient( ClassLoader classLoader, ErrorHandler errorHandler, String poolName ) {
- this.classLoader = classLoader;
- this.errorHandler = errorHandler;
- this.poolName = poolName;
- init();
- }
-
- /**
- * Initializes client object to defaults.
- *
- * This enables compression and sets compression threshhold to 15 KB.
- */
- private void init() {
- this.sanitizeKeys = true;
- this.primitiveAsString = false;
- this.compressEnable = true;
- this.compressThreshold = COMPRESS_THRESH;
- this.defaultEncoding = "UTF-8";
- this.poolName = ( this.poolName == null ) ? "default" : this.poolName;
-
- // get a pool instance to work with for the life of this instance
- this.pool = SockIOPool.getInstance( poolName );
- }
-
- /**
- * Sets an optional ClassLoader to be used for
- * serialization.
- *
- * @param classLoader
- */
- public void setClassLoader( ClassLoader classLoader ) {
- this.classLoader = classLoader;
- }
-
- /**
- * Sets an optional ErrorHandler.
- *
- * @param errorHandler
- */
- public void setErrorHandler( ErrorHandler errorHandler ) {
- this.errorHandler = errorHandler;
- }
-
- /**
- * Enables/disables sanitizing keys by URLEncoding.
- *
- * @param sanitizeKeys if true, then URLEncode all keys
- */
- public void setSanitizeKeys( boolean sanitizeKeys ) {
- this.sanitizeKeys = sanitizeKeys;
- }
-
- /**
- * Enables storing primitive types as their String values.
- *
- * @param primitiveAsString if true, then store all primitives as their string value.
- */
- public void setPrimitiveAsString( boolean primitiveAsString ) {
- this.primitiveAsString = primitiveAsString;
- }
-
- /**
- * Sets default String encoding when storing primitives as Strings.
- * Default is UTF-8.
- *
- * @param defaultEncoding
- */
- public void setDefaultEncoding( String defaultEncoding ) {
- this.defaultEncoding = defaultEncoding;
- }
-
- /**
- * Enable storing compressed data, provided it meets the threshold requirements.
- *
- * If enabled, data will be stored in compressed form if it is
- * longer than the threshold length set with setCompressThreshold(int)
- *
- * The default is that compression is enabled.
- *
- * Even if compression is disabled, compressed data will be automatically
- * decompressed.
- *
- * @param compressEnable true
to enable compression, false
to disable compression
- */
- public void setCompressEnable( boolean compressEnable ) {
- this.compressEnable = compressEnable;
- }
-
- /**
- * Sets the required length for data to be considered for compression.
- *
- * If the length of the data to be stored is not equal or larger than this value, it will
- * not be compressed.
- *
- * This defaults to 15 KB.
- *
- * @param compressThreshold required length of data to consider compression
- */
- public void setCompressThreshold( long compressThreshold ) {
- this.compressThreshold = compressThreshold;
- }
-
- /**
- * Checks to see if key exists in cache.
- *
- * @param key the key to look for
- * @return true if key found in cache, false if not (or if cache is down)
- */
- public boolean keyExists( String key ) {
- return ( this.get( key, null, true ) != null );
- }
-
- /**
- * Deletes an object from cache given cache key.
- *
- * @param key the key to be removed
- * @return true
, if the data was deleted successfully
- */
- public boolean delete( String key ) {
- return delete( key, null, null );
- }
-
- /**
- * Deletes an object from cache given cache key and expiration date.
- *
- * @param key the key to be removed
- * @param expiry when to expire the record.
- * @return true
, if the data was deleted successfully
- */
- public boolean delete( String key, Date expiry ) {
- return delete( key, null, expiry );
- }
-
- /**
- * Deletes an object from cache given cache key, a delete time, and an optional hashcode.
- *
- * The item is immediately made non retrievable.
- * Keep in mind {@link #add(String, Object) add} and {@link #replace(String, Object) replace}
- * will fail when used with the same key will fail, until the server reaches the
- * specified time. However, {@link #set(String, Object) set} will succeed,
- * and the new value will not be deleted.
- *
- * @param key the key to be removed
- * @param hashCode if not null, then the int hashcode to use
- * @param expiry when to expire the record.
- * @return true
, if the data was deleted successfully
- */
- public boolean delete( String key, Integer hashCode, Date expiry ) {
-
- if ( key == null ) {
- log.error( "null value for key passed to delete()" );
- return false;
- }
-
- try {
- key = sanitizeKey( key );
- }
- catch ( UnsupportedEncodingException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnDelete( this, e, key );
-
- log.error( "failed to sanitize your key!", e );
- return false;
- }
-
- // get SockIO obj from hash or from key
- SockIOPool.SockIO sock = pool.getSock( key, hashCode );
-
- // return false if unable to get SockIO obj
- if ( sock == null ) {
- if ( errorHandler != null )
- errorHandler.handleErrorOnDelete( this, new IOException( "no socket to server available" ), key );
- return false;
- }
-
- // build command
- StringBuilder command = new StringBuilder( "delete " ).append( key );
- if ( expiry != null )
- command.append( " " + expiry.getTime() / 1000 );
-
- command.append( "\r\n" );
-
- try {
- sock.write( command.toString().getBytes() );
- sock.flush();
-
- // if we get appropriate response back, then we return true
- String line = sock.readLine();
- if ( DELETED.equals( line ) ) {
- if ( log.isInfoEnabled() )
- log.info( "++++ deletion of key: " + key + " from cache was a success" );
-
- // return sock to pool and bail here
- sock.close();
- sock = null;
- return true;
- }
- else if ( NOTFOUND.equals( line ) ) {
- if ( log.isInfoEnabled() )
- log.info( "++++ deletion of key: " + key + " from cache failed as the key was not found" );
- }
- else {
- log.error( "++++ error deleting key: " + key );
- log.error( "++++ server response: " + line );
- }
- }
- catch ( IOException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnDelete( this, e, key );
-
- // exception thrown
- log.error( "++++ exception thrown while writing bytes to server on delete" );
- log.error( e.getMessage(), e );
-
- try {
- sock.trueClose();
- }
- catch ( IOException ioe ) {
- log.error( "++++ failed to close socket : " + sock.toString() );
- }
-
- sock = null;
- }
-
- if ( sock != null ) {
- sock.close();
- sock = null;
- }
-
- return false;
- }
-
- /**
- * Stores data on the server; only the key and the value are specified.
- *
- * @param key key to store data under
- * @param value value to store
- * @return true, if the data was successfully stored
- */
- public boolean set( String key, Object value ) {
- return set( "set", key, value, null, null, primitiveAsString );
- }
-
- /**
- * Stores data on the server; only the key and the value are specified.
- *
- * @param key key to store data under
- * @param value value to store
- * @param hashCode if not null, then the int hashcode to use
- * @return true, if the data was successfully stored
- */
- public boolean set( String key, Object value, Integer hashCode ) {
- return set( "set", key, value, null, hashCode, primitiveAsString );
- }
-
- /**
- * Stores data on the server; the key, value, and an expiration time are specified.
- *
- * @param key key to store data under
- * @param value value to store
- * @param expiry when to expire the record
- * @return true, if the data was successfully stored
- */
- public boolean set( String key, Object value, Date expiry ) {
- return set( "set", key, value, expiry, null, primitiveAsString );
- }
-
- /**
- * Stores data on the server; the key, value, and an expiration time are specified.
- *
- * @param key key to store data under
- * @param value value to store
- * @param expiry when to expire the record
- * @param hashCode if not null, then the int hashcode to use
- * @return true, if the data was successfully stored
- */
- public boolean set( String key, Object value, Date expiry, Integer hashCode ) {
- return set( "set", key, value, expiry, hashCode, primitiveAsString );
- }
-
- /**
- * Adds data to the server; only the key and the value are specified.
- *
- * @param key key to store data under
- * @param value value to store
- * @return true, if the data was successfully stored
- */
- public boolean add( String key, Object value ) {
- return set( "add", key, value, null, null, primitiveAsString );
- }
-
- /**
- * Adds data to the server; the key, value, and an optional hashcode are passed in.
- *
- * @param key key to store data under
- * @param value value to store
- * @param hashCode if not null, then the int hashcode to use
- * @return true, if the data was successfully stored
- */
- public boolean add( String key, Object value, Integer hashCode ) {
- return set( "add", key, value, null, hashCode, primitiveAsString );
- }
-
- /**
- * Adds data to the server; the key, value, and an expiration time are specified.
- *
- * @param key key to store data under
- * @param value value to store
- * @param expiry when to expire the record
- * @return true, if the data was successfully stored
- */
- public boolean add( String key, Object value, Date expiry ) {
- return set( "add", key, value, expiry, null, primitiveAsString );
- }
-
- /**
- * Adds data to the server; the key, value, and an expiration time are specified.
- *
- * @param key key to store data under
- * @param value value to store
- * @param expiry when to expire the record
- * @param hashCode if not null, then the int hashcode to use
- * @return true, if the data was successfully stored
- */
- public boolean add( String key, Object value, Date expiry, Integer hashCode ) {
- return set( "add", key, value, expiry, hashCode, primitiveAsString );
- }
-
- /**
- * Updates data on the server; only the key and the value are specified.
- *
- * @param key key to store data under
- * @param value value to store
- * @return true, if the data was successfully stored
- */
- public boolean replace( String key, Object value ) {
- return set( "replace", key, value, null, null, primitiveAsString );
- }
-
- /**
- * Updates data on the server; only the key and the value and an optional hash are specified.
- *
- * @param key key to store data under
- * @param value value to store
- * @param hashCode if not null, then the int hashcode to use
- * @return true, if the data was successfully stored
- */
- public boolean replace( String key, Object value, Integer hashCode ) {
- return set( "replace", key, value, null, hashCode, primitiveAsString );
- }
-
- /**
- * Updates data on the server; the key, value, and an expiration time are specified.
- *
- * @param key key to store data under
- * @param value value to store
- * @param expiry when to expire the record
- * @return true, if the data was successfully stored
- */
- public boolean replace( String key, Object value, Date expiry ) {
- return set( "replace", key, value, expiry, null, primitiveAsString );
- }
-
- /**
- * Updates data on the server; the key, value, and an expiration time are specified.
- *
- * @param key key to store data under
- * @param value value to store
- * @param expiry when to expire the record
- * @param hashCode if not null, then the int hashcode to use
- * @return true, if the data was successfully stored
- */
- public boolean replace( String key, Object value, Date expiry, Integer hashCode ) {
- return set( "replace", key, value, expiry, hashCode, primitiveAsString );
- }
-
- /**
- * Stores data to cache.
- *
- * If data does not already exist for this key on the server, or if the key is being
- * deleted, the specified value will not be stored.
- * The server will automatically delete the value when the expiration time has been reached.
- *
- * If compression is enabled, and the data is longer than the compression threshold
- * the data will be stored in compressed form.
- *
- * As of the current release, all objects stored will use java serialization.
- *
- * @param cmdname action to take (set, add, replace)
- * @param key key to store cache under
- * @param value object to cache
- * @param expiry expiration
- * @param hashCode if not null, then the int hashcode to use
- * @param asString store this object as a string?
- * @return true/false indicating success
- */
- private boolean set( String cmdname, String key, Object value, Date expiry, Integer hashCode, boolean asString ) {
-
- if ( cmdname == null || cmdname.trim().equals( "" ) || key == null ) {
- log.error( "key is null or cmd is null/empty for set()" );
- return false;
- }
-
- try {
- key = sanitizeKey( key );
- }
- catch ( UnsupportedEncodingException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnSet( this, e, key );
-
- log.error( "failed to sanitize your key!", e );
- return false;
- }
-
- if ( value == null ) {
- log.error( "trying to store a null value to cache" );
- return false;
- }
-
- // get SockIO obj
- SockIOPool.SockIO sock = pool.getSock( key, hashCode );
-
- if ( sock == null ) {
- if ( errorHandler != null )
- errorHandler.handleErrorOnSet( this, new IOException( "no socket to server available" ), key );
- return false;
- }
-
- if ( expiry == null )
- expiry = new Date(0);
-
- // store flags
- int flags = 0;
-
- // byte array to hold data
- byte[] val;
-
- if ( NativeHandler.isHandled( value ) ) {
-
- if ( asString ) {
- // useful for sharing data between java and non-java
- // and also for storing ints for the increment method
- try {
- if ( log.isInfoEnabled() )
- log.info( "++++ storing data as a string for key: " + key + " for class: " + value.getClass().getName() );
- val = value.toString().getBytes( defaultEncoding );
- }
- catch ( UnsupportedEncodingException ue ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnSet( this, ue, key );
-
- log.error( "invalid encoding type used: " + defaultEncoding, ue );
- sock.close();
- sock = null;
- return false;
- }
- }
- else {
- try {
- if ( log.isInfoEnabled() )
- log.info( "Storing with native handler..." );
- flags |= NativeHandler.getMarkerFlag( value );
- val = NativeHandler.encode( value );
- }
- catch ( Exception e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnSet( this, e, key );
-
- log.error( "Failed to native handle obj", e );
-
- sock.close();
- sock = null;
- return false;
- }
- }
- }
- else {
- // always serialize for non-primitive types
- try {
- if ( log.isInfoEnabled() )
- log.info( "++++ serializing for key: " + key + " for class: " + value.getClass().getName() );
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- (new ObjectOutputStream( bos )).writeObject( value );
- val = bos.toByteArray();
- flags |= F_SERIALIZED;
- }
- catch ( IOException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnSet( this, e, key );
-
- // if we fail to serialize, then
- // we bail
- log.error( "failed to serialize obj", e );
- log.error( value.toString() );
-
- // return socket to pool and bail
- sock.close();
- sock = null;
- return false;
- }
- }
-
- // now try to compress if we want to
- // and if the length is over the threshold
- if ( compressEnable && val.length > compressThreshold ) {
-
- try {
- if ( log.isInfoEnabled() ) {
- log.info( "++++ trying to compress data" );
- log.info( "++++ size prior to compression: " + val.length );
- }
- ByteArrayOutputStream bos = new ByteArrayOutputStream( val.length );
- GZIPOutputStream gos = new GZIPOutputStream( bos );
- gos.write( val, 0, val.length );
- gos.finish();
- gos.close();
-
- // store it and set compression flag
- val = bos.toByteArray();
- flags |= F_COMPRESSED;
-
- if ( log.isInfoEnabled() )
- log.info( "++++ compression succeeded, size after: " + val.length );
- }
- catch ( IOException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnSet( this, e, key );
-
- log.error( "IOException while compressing stream: " + e.getMessage() );
- log.error( "storing data uncompressed" );
- }
- }
-
- // now write the data to the cache server
- try {
- String cmd = String.format( "%s %s %d %d %d\r\n", cmdname, key, flags, (expiry.getTime() / 1000), val.length );
- sock.write( cmd.getBytes() );
- sock.write( val );
- sock.write( "\r\n".getBytes() );
- sock.flush();
-
- // get result code
- String line = sock.readLine();
- if ( log.isInfoEnabled() )
- log.info( "++++ memcache cmd (result code): " + cmd + " (" + line + ")" );
-
- if ( STORED.equals( line ) ) {
- if ( log.isInfoEnabled() )
- log.info("++++ data successfully stored for key: " + key );
- sock.close();
- sock = null;
- return true;
- }
- else if ( NOTSTORED.equals( line ) ) {
- if ( log.isInfoEnabled() )
- log.info( "++++ data not stored in cache for key: " + key );
- }
- else {
- log.error( "++++ error storing data in cache for key: " + key + " -- length: " + val.length );
- log.error( "++++ server response: " + line );
- }
- }
- catch ( IOException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnSet( this, e, key );
-
- // exception thrown
- log.error( "++++ exception thrown while writing bytes to server on set" );
- log.error( e.getMessage(), e );
-
- try {
- sock.trueClose();
- }
- catch ( IOException ioe ) {
- log.error( "++++ failed to close socket : " + sock.toString() );
- }
-
- sock = null;
- }
-
- if ( sock != null ) {
- sock.close();
- sock = null;
- }
-
- return false;
- }
-
- /**
- * Store a counter to memcached given a key
- *
- * @param key cache key
- * @param counter number to store
- * @return true/false indicating success
- */
- public boolean storeCounter( String key, long counter ) {
- return set( "set", key, new Long( counter ), null, null, true );
- }
-
- /**
- * Store a counter to memcached given a key
- *
- * @param key cache key
- * @param counter number to store
- * @return true/false indicating success
- */
- public boolean storeCounter( String key, Long counter ) {
- return set( "set", key, counter, null, null, true );
- }
-
- /**
- * Store a counter to memcached given a key
- *
- * @param key cache key
- * @param counter number to store
- * @param hashCode if not null, then the int hashcode to use
- * @return true/false indicating success
- */
- public boolean storeCounter( String key, Long counter, Integer hashCode ) {
- return set( "set", key, counter, null, hashCode, true );
- }
-
- /**
- * Returns value in counter at given key as long.
- *
- * @param key cache ket
- * @return counter value or -1 if not found
- */
- public long getCounter( String key ) {
- return getCounter( key, null );
- }
-
- /**
- * Returns value in counter at given key as long.
- *
- * @param key cache ket
- * @param hashCode if not null, then the int hashcode to use
- * @return counter value or -1 if not found
- */
- public long getCounter( String key, Integer hashCode ) {
-
- if ( key == null ) {
- log.error( "null key for getCounter()" );
- return -1;
- }
-
- long counter = -1;
- try {
- counter = Long.parseLong( (String)get( key, hashCode, true ) );
- }
- catch ( Exception ex ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, ex, key );
-
- // not found or error getting out
- if ( log.isInfoEnabled() )
- log.info( String.format( "Failed to parse Long value for key: %s", key ) );
- }
-
- return counter;
- }
-
- /**
- * Thread safe way to initialize and increment a counter.
- *
- * @param key key where the data is stored
- * @return value of incrementer
- */
- public long addOrIncr( String key ) {
- return addOrIncr( key, 0, null );
- }
-
- /**
- * Thread safe way to initialize and increment a counter.
- *
- * @param key key where the data is stored
- * @param inc value to set or increment by
- * @return value of incrementer
- */
- public long addOrIncr( String key, long inc ) {
- return addOrIncr( key, inc, null );
- }
-
- /**
- * Thread safe way to initialize and increment a counter.
- *
- * @param key key where the data is stored
- * @param inc value to set or increment by
- * @param hashCode if not null, then the int hashcode to use
- * @return value of incrementer
- */
- public long addOrIncr( String key, long inc, Integer hashCode ) {
- boolean ret = set( "add", key, new Long( inc ), null, hashCode, true );
-
- if ( ret ) {
- return inc;
- }
- else {
- return incrdecr( "incr", key, inc, hashCode );
- }
- }
-
- /**
- * Thread safe way to initialize and decrement a counter.
- *
- * @param key key where the data is stored
- * @return value of incrementer
- */
- public long addOrDecr( String key ) {
- return addOrDecr( key, 0, null );
- }
-
- /**
- * Thread safe way to initialize and decrement a counter.
- *
- * @param key key where the data is stored
- * @param inc value to set or increment by
- * @return value of incrementer
- */
- public long addOrDecr( String key, long inc ) {
- return addOrDecr( key, inc, null );
- }
-
- /**
- * Thread safe way to initialize and decrement a counter.
- *
- * @param key key where the data is stored
- * @param inc value to set or increment by
- * @param hashCode if not null, then the int hashcode to use
- * @return value of incrementer
- */
- public long addOrDecr( String key, long inc, Integer hashCode ) {
- boolean ret = set( "add", key, new Long( inc ), null, hashCode, true );
-
- if ( ret ) {
- return inc;
- }
- else {
- return incrdecr( "decr", key, inc, hashCode );
- }
- }
-
- /**
- * Increment the value at the specified key by 1, and then return it.
- *
- * @param key key where the data is stored
- * @return -1, if the key is not found, the value after incrementing otherwise
- */
- public long incr( String key ) {
- return incrdecr( "incr", key, 1, null );
- }
-
- /**
- * Increment the value at the specified key by passed in val.
- *
- * @param key key where the data is stored
- * @param inc how much to increment by
- * @return -1, if the key is not found, the value after incrementing otherwise
- */
- public long incr( String key, long inc ) {
- return incrdecr( "incr", key, inc, null );
- }
-
- /**
- * Increment the value at the specified key by the specified increment, and then return it.
- *
- * @param key key where the data is stored
- * @param inc how much to increment by
- * @param hashCode if not null, then the int hashcode to use
- * @return -1, if the key is not found, the value after incrementing otherwise
- */
- public long incr( String key, long inc, Integer hashCode ) {
- return incrdecr( "incr", key, inc, hashCode );
- }
-
- /**
- * Decrement the value at the specified key by 1, and then return it.
- *
- * @param key key where the data is stored
- * @return -1, if the key is not found, the value after incrementing otherwise
- */
- public long decr( String key ) {
- return incrdecr( "decr", key, 1, null );
- }
-
- /**
- * Decrement the value at the specified key by passed in value, and then return it.
- *
- * @param key key where the data is stored
- * @param inc how much to increment by
- * @return -1, if the key is not found, the value after incrementing otherwise
- */
- public long decr( String key, long inc ) {
- return incrdecr( "decr", key, inc, null );
- }
-
- /**
- * Decrement the value at the specified key by the specified increment, and then return it.
- *
- * @param key key where the data is stored
- * @param inc how much to increment by
- * @param hashCode if not null, then the int hashcode to use
- * @return -1, if the key is not found, the value after incrementing otherwise
- */
- public long decr( String key, long inc, Integer hashCode ) {
- return incrdecr( "decr", key, inc, hashCode );
- }
-
- /**
- * Increments/decrements the value at the specified key by inc.
- *
- * Note that the server uses a 32-bit unsigned integer, and checks for
- * underflow. In the event of underflow, the result will be zero. Because
- * Java lacks unsigned types, the value is returned as a 64-bit integer.
- * The server will only decrement a value if it already exists;
- * if a value is not found, -1 will be returned.
- *
- * @param cmdname increment/decrement
- * @param key cache key
- * @param inc amount to incr or decr
- * @param hashCode if not null, then the int hashcode to use
- * @return new value or -1 if not exist
- */
- private long incrdecr( String cmdname, String key, long inc, Integer hashCode ) {
-
- if ( key == null ) {
- log.error( "null key for incrdecr()" );
- return -1;
- }
-
- try {
- key = sanitizeKey( key );
- }
- catch ( UnsupportedEncodingException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, e, key );
-
- log.error( "failed to sanitize your key!", e );
- return -1;
- }
-
- // get SockIO obj for given cache key
- SockIOPool.SockIO sock = pool.getSock( key, hashCode );
-
- if ( sock == null ) {
- if ( errorHandler != null )
- errorHandler.handleErrorOnSet( this, new IOException( "no socket to server available" ), key );
- return -1;
- }
-
- try {
- String cmd = String.format( "%s %s %d\r\n", cmdname, key, inc );
- if ( log.isDebugEnabled() )
- log.debug( "++++ memcache incr/decr command: " + cmd );
-
- sock.write( cmd.getBytes() );
- sock.flush();
-
- // get result back
- String line = sock.readLine();
-
- if ( line.matches( "\\d+" ) ) {
-
- // return sock to pool and return result
- sock.close();
- try {
- return Long.parseLong( line );
- }
- catch ( Exception ex ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, ex, key );
-
- log.error( String.format( "Failed to parse Long value for key: %s", key ) );
- }
- }
- else if ( NOTFOUND.equals( line ) ) {
- if ( log.isInfoEnabled() )
- log.info( "++++ key not found to incr/decr for key: " + key );
- }
- else {
- log.error( "++++ error incr/decr key: " + key );
- log.error( "++++ server response: " + line );
- }
- }
- catch ( IOException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, e, key );
-
- // exception thrown
- log.error( "++++ exception thrown while writing bytes to server on incr/decr" );
- log.error( e.getMessage(), e );
-
- try {
- sock.trueClose();
- }
- catch ( IOException ioe ) {
- log.error( "++++ failed to close socket : " + sock.toString() );
- }
-
- sock = null;
- }
-
- if ( sock != null ) {
- sock.close();
- sock = null;
- }
-
- return -1;
- }
-
- /**
- * Retrieve a key from the server, using a specific hash.
- *
- * If the data was compressed or serialized when compressed, it will automatically
- * be decompressed or serialized, as appropriate. (Inclusive or)
- *
- * Non-serialized data will be returned as a string, so explicit conversion to
- * numeric types will be necessary, if desired
- *
- * @param key key where data is stored
- * @return the object that was previously stored, or null if it was not previously stored
- */
- public Object get( String key ) {
- return get( key, null, false );
- }
-
- /**
- * Retrieve a key from the server, using a specific hash.
- *
- * If the data was compressed or serialized when compressed, it will automatically
- * be decompressed or serialized, as appropriate. (Inclusive or)
- *
- * Non-serialized data will be returned as a string, so explicit conversion to
- * numeric types will be necessary, if desired
- *
- * @param key key where data is stored
- * @param hashCode if not null, then the int hashcode to use
- * @return the object that was previously stored, or null if it was not previously stored
- */
- public Object get( String key, Integer hashCode ) {
- return get( key, hashCode, false );
- }
-
- /**
- * Retrieve a key from the server, using a specific hash.
- *
- * If the data was compressed or serialized when compressed, it will automatically
- * be decompressed or serialized, as appropriate. (Inclusive or)
- *
- * Non-serialized data will be returned as a string, so explicit conversion to
- * numeric types will be necessary, if desired
- *
- * @param key key where data is stored
- * @param hashCode if not null, then the int hashcode to use
- * @param asString if true, then return string val
- * @return the object that was previously stored, or null if it was not previously stored
- */
- public Object get( String key, Integer hashCode, boolean asString ) {
-
- if ( key == null ) {
- log.error( "key is null for get()" );
- return null;
- }
-
- try {
- key = sanitizeKey( key );
- }
- catch ( UnsupportedEncodingException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, e, key );
-
- log.error( "failed to sanitize your key!", e );
- return null;
- }
-
- // get SockIO obj using cache key
- SockIOPool.SockIO sock = pool.getSock( key, hashCode );
-
- if ( sock == null ) {
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, new IOException( "no socket to server available" ), key );
- return null;
- }
-
- try {
- String cmd = "get " + key + "\r\n";
-
- if ( log.isDebugEnabled() )
- log.debug("++++ memcache get command: " + cmd);
-
- sock.write( cmd.getBytes() );
- sock.flush();
-
- // ready object
- Object o = null;
-
- while ( true ) {
- String line = sock.readLine();
-
- if ( log.isDebugEnabled() )
- log.debug( "++++ line: " + line );
-
- if ( line.startsWith( VALUE ) ) {
- String[] info = line.split(" ");
- int flag = Integer.parseInt( info[2] );
- int length = Integer.parseInt( info[3] );
-
- if ( log.isDebugEnabled() ) {
- log.debug( "++++ key: " + key );
- log.debug( "++++ flags: " + flag );
- log.debug( "++++ length: " + length );
- }
-
- // read obj into buffer
- byte[] buf = new byte[length];
- sock.read( buf );
- sock.clearEOL();
-
- if ( (flag & F_COMPRESSED) == F_COMPRESSED ) {
- try {
- // read the input stream, and write to a byte array output stream since
- // we have to read into a byte array, but we don't know how large it
- // will need to be, and we don't want to resize it a bunch
- GZIPInputStream gzi = new GZIPInputStream( new ByteArrayInputStream( buf ) );
- ByteArrayOutputStream bos = new ByteArrayOutputStream( buf.length );
-
- int count;
- byte[] tmp = new byte[2048];
- while ( (count = gzi.read(tmp)) != -1 ) {
- bos.write( tmp, 0, count );
- }
-
- // store uncompressed back to buffer
- buf = bos.toByteArray();
- gzi.close();
- }
- catch ( IOException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, e, key );
-
- log.error( "++++ IOException thrown while trying to uncompress input stream for key: " + key + " -- " + e.getMessage() );
- throw new NestedIOException( "++++ IOException thrown while trying to uncompress input stream for key: " + key, e );
- }
- }
-
- // we can only take out serialized objects
- if ( ( flag & F_SERIALIZED ) != F_SERIALIZED ) {
- if ( primitiveAsString || asString ) {
- // pulling out string value
- if ( log.isInfoEnabled() )
- log.info( "++++ retrieving object and stuffing into a string." );
- o = new String( buf, defaultEncoding );
- }
- else {
- // decoding object
- try {
- o = NativeHandler.decode( buf, flag );
- }
- catch ( Exception e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, e, key );
-
- log.error( "++++ Exception thrown while trying to deserialize for key: " + key, e );
- throw new NestedIOException( e );
- }
- }
- }
- else {
- // deserialize if the data is serialized
- ContextObjectInputStream ois =
- new ContextObjectInputStream( new ByteArrayInputStream( buf ), classLoader );
- try {
- o = ois.readObject();
- if ( log.isInfoEnabled() )
- log.info( "++++ deserializing " + o.getClass() );
- }
- catch ( Exception e ) {
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, e, key );
-
- o = null;
- log.error( "++++ Exception thrown while trying to deserialize for key: " + key + " -- " + e.getMessage() );
- }
- }
- }
- else if ( END.equals( line ) ) {
- if ( log.isDebugEnabled() )
- log.debug( "++++ finished reading from cache server" );
- break;
- }
- }
-
- sock.close();
- sock = null;
- return o;
- }
- catch ( IOException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, e, key );
-
- // exception thrown
- log.error( "++++ exception thrown while trying to get object from cache for key: " + key + " -- " + e.getMessage() );
-
- try {
- sock.trueClose();
- }
- catch ( IOException ioe ) {
- log.error( "++++ failed to close socket : " + sock.toString() );
- }
- sock = null;
- }
-
- if ( sock != null )
- sock.close();
-
- return null;
- }
-
- /**
- * Retrieve multiple objects from the memcache.
- *
- * This is recommended over repeated calls to {@link #get(String) get()}, since it
- * is more efficient.
- *
- * @param keys String array of keys to retrieve
- * @return Object array ordered in same order as key array containing results
- */
- public Object[] getMultiArray( String[] keys ) {
- return getMultiArray( keys, null, false );
- }
-
- /**
- * Retrieve multiple objects from the memcache.
- *
- * This is recommended over repeated calls to {@link #get(String) get()}, since it
- * is more efficient.
- *
- * @param keys String array of keys to retrieve
- * @param hashCodes if not null, then the Integer array of hashCodes
- * @return Object array ordered in same order as key array containing results
- */
- public Object[] getMultiArray( String[] keys, Integer[] hashCodes ) {
- return getMultiArray( keys, hashCodes, false );
- }
-
- /**
- * Retrieve multiple objects from the memcache.
- *
- * This is recommended over repeated calls to {@link #get(String) get()}, since it
- * is more efficient.
- *
- * @param keys String array of keys to retrieve
- * @param hashCodes if not null, then the Integer array of hashCodes
- * @param asString if true, retrieve string vals
- * @return Object array ordered in same order as key array containing results
- */
- public Object[] getMultiArray( String[] keys, Integer[] hashCodes, boolean asString ) {
-
- Map data = getMulti( keys, hashCodes, asString );
-
- if ( data == null )
- return null;
-
- Object[] res = new Object[ keys.length ];
- for ( int i = 0; i < keys.length; i++ ) {
- res[i] = data.get( keys[i] );
- }
-
- return res;
- }
-
- /**
- * Retrieve multiple objects from the memcache.
- *
- * This is recommended over repeated calls to {@link #get(String) get()}, since it
- * is more efficient.
- *
- * @param keys String array of keys to retrieve
- * @return a hashmap with entries for each key is found by the server,
- * keys that are not found are not entered into the hashmap, but attempting to
- * retrieve them from the hashmap gives you null.
- */
- public Map getMulti( String[] keys ) {
- return getMulti( keys, null, false );
- }
-
- /**
- * Retrieve multiple keys from the memcache.
- *
- * This is recommended over repeated calls to {@link #get(String) get()}, since it
- * is more efficient.
- *
- * @param keys keys to retrieve
- * @param hashCodes if not null, then the Integer array of hashCodes
- * @return a hashmap with entries for each key is found by the server,
- * keys that are not found are not entered into the hashmap, but attempting to
- * retrieve them from the hashmap gives you null.
- */
- public Map getMulti( String[] keys, Integer[] hashCodes ) {
- return getMulti( keys, hashCodes, false );
- }
-
- /**
- * Retrieve multiple keys from the memcache.
- *
- * This is recommended over repeated calls to {@link #get(String) get()}, since it
- * is more efficient.
- *
- * @param keys keys to retrieve
- * @param hashCodes if not null, then the Integer array of hashCodes
- * @param asString if true then retrieve using String val
- * @return a hashmap with entries for each key is found by the server,
- * keys that are not found are not entered into the hashmap, but attempting to
- * retrieve them from the hashmap gives you null.
- */
- public Map getMulti( String[] keys, Integer[] hashCodes, boolean asString ) {
-
- if ( keys == null || keys.length == 0 ) {
- log.error( "missing keys for getMulti()" );
- return null;
- }
-
- Map cmdMap =
- new HashMap();
-
- for ( int i = 0; i < keys.length; ++i ) {
-
- String key = keys[i];
- if ( key == null ) {
- log.error( "null key, so skipping" );
- continue;
- }
-
- Integer hash = null;
- if ( hashCodes != null && hashCodes.length > i )
- hash = hashCodes[ i ];
-
- String cleanKey = key;
- try {
- cleanKey = sanitizeKey( key );
- }
- catch ( UnsupportedEncodingException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, e, key );
-
- log.error( "failed to sanitize your key!", e );
- continue;
- }
-
- // get SockIO obj from cache key
- SockIOPool.SockIO sock = pool.getSock( cleanKey, hash );
-
- if ( sock == null ) {
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, new IOException( "no socket to server available" ), key );
- continue;
- }
-
- // store in map and list if not already
- if ( !cmdMap.containsKey( sock.getHost() ) )
- cmdMap.put( sock.getHost(), new StringBuilder( "get" ) );
-
- cmdMap.get( sock.getHost() ).append( " " + cleanKey );
-
- // return to pool
- sock.close();
- }
-
- if ( log.isInfoEnabled() )
- log.info( "multi get socket count : " + cmdMap.size() );
-
- // now query memcache
- Map ret =
- new HashMap( keys.length );
-
- // now use new NIO implementation
- (new NIOLoader( this )).doMulti( asString, cmdMap, keys, ret );
-
- // fix the return array in case we had to rewrite any of the keys
- for ( String key : keys ) {
-
- String cleanKey = key;
- try {
- cleanKey = sanitizeKey( key );
- }
- catch ( UnsupportedEncodingException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, e, key );
-
- log.error( "failed to sanitize your key!", e );
- continue;
- }
-
- if ( ! key.equals( cleanKey ) && ret.containsKey( cleanKey ) ) {
- ret.put( key, ret.get( cleanKey ) );
- ret.remove( cleanKey );
- }
-
- // backfill missing keys w/ null value
- if ( ! ret.containsKey( key ) )
- ret.put( key, null );
- }
-
- if ( log.isDebugEnabled() )
- log.debug( "++++ memcache: got back " + ret.size() + " results" );
- return ret;
- }
-
- /**
- * This method loads the data from cache into a Map.
- *
- * Pass a SockIO object which is ready to receive data and a HashMap
- * to store the results.
- *
- * @param sock socket waiting to pass back data
- * @param hm hashmap to store data into
- * @param asString if true, and if we are using NativehHandler, return string val
- * @throws IOException if io exception happens while reading from socket
- */
- private void loadMulti( LineInputStream input, Map hm, boolean asString ) throws IOException {
-
- while ( true ) {
- String line = input.readLine();
- if ( log.isDebugEnabled() )
- log.debug( "++++ line: " + line );
-
- if ( line.startsWith( VALUE ) ) {
- String[] info = line.split(" ");
- String key = info[1];
- int flag = Integer.parseInt( info[2] );
- int length = Integer.parseInt( info[3] );
-
- if ( log.isDebugEnabled() ) {
- log.debug( "++++ key: " + key );
- log.debug( "++++ flags: " + flag );
- log.debug( "++++ length: " + length );
- }
-
- // read obj into buffer
- byte[] buf = new byte[length];
- input.read( buf );
- input.clearEOL();
-
- // ready object
- Object o;
-
- // check for compression
- if ( (flag & F_COMPRESSED) == F_COMPRESSED ) {
- try {
- // read the input stream, and write to a byte array output stream since
- // we have to read into a byte array, but we don't know how large it
- // will need to be, and we don't want to resize it a bunch
- GZIPInputStream gzi = new GZIPInputStream( new ByteArrayInputStream( buf ) );
- ByteArrayOutputStream bos = new ByteArrayOutputStream( buf.length );
-
- int count;
- byte[] tmp = new byte[2048];
- while ( (count = gzi.read(tmp)) != -1 ) {
- bos.write( tmp, 0, count );
- }
-
- // store uncompressed back to buffer
- buf = bos.toByteArray();
- gzi.close();
- }
- catch ( IOException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, e, key );
-
- log.error( "++++ IOException thrown while trying to uncompress input stream for key: " + key + " -- " + e.getMessage() );
- throw new NestedIOException( "++++ IOException thrown while trying to uncompress input stream for key: " + key, e );
- }
- }
-
- // we can only take out serialized objects
- if ( ( flag & F_SERIALIZED ) != F_SERIALIZED ) {
- if ( primitiveAsString || asString ) {
- // pulling out string value
- if ( log.isInfoEnabled() )
- log.info( "++++ retrieving object and stuffing into a string." );
- o = new String( buf, defaultEncoding );
- }
- else {
- // decoding object
- try {
- o = NativeHandler.decode( buf, flag );
- }
- catch ( Exception e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, e, key );
-
- log.error( "++++ Exception thrown while trying to deserialize for key: " + key + " -- " + e.getMessage() );
- throw new NestedIOException( e );
- }
- }
- }
- else {
- // deserialize if the data is serialized
- ContextObjectInputStream ois =
- new ContextObjectInputStream( new ByteArrayInputStream( buf ), classLoader );
- try {
- o = ois.readObject();
- if ( log.isInfoEnabled() )
- log.info( "++++ deserializing " + o.getClass() );
- }
- catch ( InvalidClassException e ) {
- /* Errors de-serializing are to be expected in the case of a
- * long running server that spans client restarts with updated
- * classes.
- */
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, e, key );
-
- o = null;
- log.error( "++++ InvalidClassException thrown while trying to deserialize for key: " + key + " -- " + e.getMessage() );
- }
- catch ( ClassNotFoundException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this, e, key );
-
- o = null;
- log.error( "++++ ClassNotFoundException thrown while trying to deserialize for key: " + key + " -- " + e.getMessage() );
- }
- }
-
- // store the object into the cache
- if ( o != null )
- hm.put( key, o );
- }
- else if ( END.equals( line ) ) {
- if ( log.isDebugEnabled() )
- log.debug( "++++ finished reading from cache server" );
- break;
- }
- }
- }
-
- private String sanitizeKey( String key ) throws UnsupportedEncodingException {
- return ( sanitizeKeys ) ? URLEncoder.encode( key, "UTF-8" ) : key;
- }
-
- /**
- * Invalidates the entire cache.
- *
- * Will return true only if succeeds in clearing all servers.
- *
- * @return success true/false
- */
- public boolean flushAll() {
- return flushAll( null );
- }
-
- /**
- * Invalidates the entire cache.
- *
- * Will return true only if succeeds in clearing all servers.
- * If pass in null, then will try to flush all servers.
- *
- * @param servers optional array of host(s) to flush (host:port)
- * @return success true/false
- */
- public boolean flushAll( String[] servers ) {
-
- // get SockIOPool instance
- // return false if unable to get SockIO obj
- if ( pool == null ) {
- log.error( "++++ unable to get SockIOPool instance" );
- return false;
- }
-
- // get all servers and iterate over them
- servers = ( servers == null )
- ? pool.getServers()
- : servers;
-
- // if no servers, then return early
- if ( servers == null || servers.length <= 0 ) {
- log.error( "++++ no servers to flush" );
- return false;
- }
-
- boolean success = true;
-
- for ( int i = 0; i < servers.length; i++ ) {
-
- SockIOPool.SockIO sock = pool.getConnection( servers[i] );
- if ( sock == null ) {
- log.error( "++++ unable to get connection to : " + servers[i] );
- success = false;
- if ( errorHandler != null )
- errorHandler.handleErrorOnFlush( this, new IOException( "no socket to server available" ) );
- continue;
- }
-
- // build command
- String command = "flush_all\r\n";
-
- try {
- sock.write( command.getBytes() );
- sock.flush();
-
- // if we get appropriate response back, then we return true
- String line = sock.readLine();
- success = ( OK.equals( line ) )
- ? success && true
- : false;
- }
- catch ( IOException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnFlush( this, e );
-
- // exception thrown
- log.error( "++++ exception thrown while writing bytes to server on flushAll" );
- log.error( e.getMessage(), e );
-
- try {
- sock.trueClose();
- }
- catch ( IOException ioe ) {
- log.error( "++++ failed to close socket : " + sock.toString() );
- }
-
- success = false;
- sock = null;
- }
-
- if ( sock != null ) {
- sock.close();
- sock = null;
- }
- }
-
- return success;
- }
-
- /**
- * Retrieves stats for all servers.
- *
- * Returns a map keyed on the servername.
- * The value is another map which contains stats
- * with stat name as key and value as value.
- *
- * @return Stats map
- */
- public Map stats() {
- return stats( null );
- }
-
- /**
- * Retrieves stats for passed in servers (or all servers).
- *
- * Returns a map keyed on the servername.
- * The value is another map which contains stats
- * with stat name as key and value as value.
- *
- * @param servers string array of servers to retrieve stats from, or all if this is null
- * @return Stats map
- */
- public Map stats( String[] servers ) {
- return stats( servers, "stats\r\n", STATS );
- }
-
- /**
- * Retrieves stats items for all servers.
- *
- * Returns a map keyed on the servername.
- * The value is another map which contains item stats
- * with itemname:number:field as key and value as value.
- *
- * @return Stats map
- */
- public Map statsItems() {
- return statsItems( null );
- }
-
- /**
- * Retrieves stats for passed in servers (or all servers).
- *
- * Returns a map keyed on the servername.
- * The value is another map which contains item stats
- * with itemname:number:field as key and value as value.
- *
- * @param servers string array of servers to retrieve stats from, or all if this is null
- * @return Stats map
- */
- public Map statsItems( String[] servers ) {
- return stats( servers, "stats items\r\n", STATS );
- }
-
- /**
- * Retrieves stats items for all servers.
- *
- * Returns a map keyed on the servername.
- * The value is another map which contains slabs stats
- * with slabnumber:field as key and value as value.
- *
- * @return Stats map
- */
- public Map statsSlabs() {
- return statsSlabs( null );
- }
-
- /**
- * Retrieves stats for passed in servers (or all servers).
- *
- * Returns a map keyed on the servername.
- * The value is another map which contains slabs stats
- * with slabnumber:field as key and value as value.
- *
- * @param servers string array of servers to retrieve stats from, or all if this is null
- * @return Stats map
- */
- public Map statsSlabs( String[] servers ) {
- return stats( servers, "stats slabs\r\n", STATS );
- }
-
- /**
- * Retrieves items cachedump for all servers.
- *
- * Returns a map keyed on the servername.
- * The value is another map which contains cachedump stats
- * with the cachekey as key and byte size and unix timestamp as value.
- *
- * @param slabNumber the item number of the cache dump
- * @return Stats map
- */
- public Map statsCacheDump( int slabNumber, int limit ) {
- return statsCacheDump( null, slabNumber, limit );
- }
-
- /**
- * Retrieves stats for passed in servers (or all servers).
- *
- * Returns a map keyed on the servername.
- * The value is another map which contains cachedump stats
- * with the cachekey as key and byte size and unix timestamp as value.
- *
- * @param servers string array of servers to retrieve stats from, or all if this is null
- * @param slabNumber the item number of the cache dump
- * @return Stats map
- */
- public Map statsCacheDump( String[] servers, int slabNumber, int limit ) {
- return stats( servers, String.format( "stats cachedump %d %d\r\n", slabNumber, limit ), ITEM );
- }
-
- private Map stats( String[] servers, String command, String lineStart ) {
-
- if ( command == null || command.trim().equals( "" ) ) {
- log.error( "++++ invalid / missing command for stats()" );
- return null;
- }
-
- // get all servers and iterate over them
- servers = (servers == null)
- ? pool.getServers()
- : servers;
-
- // if no servers, then return early
- if ( servers == null || servers.length <= 0 ) {
- log.error( "++++ no servers to check stats" );
- return null;
- }
-
- // array of stats Maps
- Map statsMaps =
- new HashMap();
-
- for ( int i = 0; i < servers.length; i++ ) {
-
- SockIOPool.SockIO sock = pool.getConnection( servers[i] );
- if ( sock == null ) {
- log.error( "++++ unable to get connection to : " + servers[i] );
- if ( errorHandler != null )
- errorHandler.handleErrorOnStats( this, new IOException( "no socket to server available" ) );
- continue;
- }
-
- // build command
- try {
- sock.write( command.getBytes() );
- sock.flush();
-
- // map to hold key value pairs
- Map stats = new HashMap();
-
- // loop over results
- while ( true ) {
- String line = sock.readLine();
- if ( log.isDebugEnabled() )
- log.debug( "++++ line: " + line );
-
- if ( line.startsWith( lineStart ) ) {
- String[] info = line.split( " ", 3 );
- String key = info[1];
- String value = info[2];
-
- if ( log.isDebugEnabled() ) {
- log.debug( "++++ key : " + key );
- log.debug( "++++ value: " + value );
- }
-
- stats.put( key, value );
- }
- else if ( END.equals( line ) ) {
- // finish when we get end from server
- if ( log.isDebugEnabled() )
- log.debug( "++++ finished reading from cache server" );
- break;
- }
- else if ( line.startsWith( ERROR ) || line.startsWith( CLIENT_ERROR ) || line.startsWith( SERVER_ERROR ) ) {
- log.error( "++++ failed to query stats" );
- log.error( "++++ server response: " + line );
- break;
- }
-
- statsMaps.put( servers[i], stats );
- }
- }
- catch ( IOException e ) {
-
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnStats( this, e );
-
- // exception thrown
- log.error( "++++ exception thrown while writing bytes to server on stats" );
- log.error( e.getMessage(), e );
-
- try {
- sock.trueClose();
- }
- catch ( IOException ioe ) {
- log.error( "++++ failed to close socket : " + sock.toString() );
- }
-
- sock = null;
- }
-
- if ( sock != null ) {
- sock.close();
- sock = null;
- }
- }
-
- return statsMaps;
- }
-
- protected final class NIOLoader {
- protected Selector selector;
- protected int numConns = 0;
- protected MemcachedClient mc;
- protected Connection[] conns;
-
- public NIOLoader( MemcachedClient mc ) {
- this.mc = mc;
- }
-
- private final class Connection {
-
- public List incoming = new ArrayList();
- public ByteBuffer outgoing;
- public SockIOPool.SockIO sock;
- public SocketChannel channel;
- private boolean isDone = false;
-
- public Connection( SockIOPool.SockIO sock, StringBuilder request ) throws IOException {
- if ( log.isDebugEnabled() )
- log.debug( "setting up connection to "+sock.getHost() );
-
- this.sock = sock;
- outgoing = ByteBuffer.wrap( request.append( "\r\n" ).toString().getBytes() );
-
- channel = sock.getChannel();
- if ( channel == null )
- throw new IOException( "dead connection to: " + sock.getHost() );
-
- channel.configureBlocking( false );
- channel.register( selector, SelectionKey.OP_WRITE, this );
- }
-
- public void close() {
- try {
- if ( isDone ) {
- // turn off non-blocking IO and return to pool
- if ( log.isDebugEnabled() )
- log.debug( "++++ gracefully closing connection to "+sock.getHost() );
-
- channel.configureBlocking( true );
- sock.close();
- return;
- }
- }
- catch ( IOException e ) {
- log.warn( "++++ memcache: unexpected error closing normally" );
- }
-
- try {
- if ( log.isDebugEnabled() )
- log.debug("forcefully closing connection to "+sock.getHost());
-
- channel.close();
- sock.trueClose();
- }
- catch ( IOException ignoreMe ) { }
- }
-
- public boolean isDone() {
- // if we know we're done, just say so
- if ( isDone )
- return true;
-
- // else find out the hard way
- int strPos = B_END.length-1;
-
- int bi = incoming.size() - 1;
- while ( bi >= 0 && strPos >= 0 ) {
- ByteBuffer buf = incoming.get( bi );
- int pos = buf.position()-1;
- while ( pos >= 0 && strPos >= 0 ) {
- if ( buf.get( pos-- ) != B_END[strPos--] )
- return false;
- }
-
- bi--;
- }
-
- isDone = strPos < 0;
- return isDone;
- }
-
- public ByteBuffer getBuffer() {
- int last = incoming.size()-1;
- if ( last >= 0 && incoming.get( last ).hasRemaining() ) {
- return incoming.get( last );
- }
- else {
- ByteBuffer newBuf = ByteBuffer.allocate( 8192 );
- incoming.add( newBuf );
- return newBuf;
- }
- }
-
- public String toString() {
- return "Connection to " + sock.getHost() + " with " + incoming.size() + " bufs; done is " + isDone;
- }
- }
-
- public void doMulti( boolean asString, Map sockKeys, String[] keys, Map ret ) {
-
- long timeRemaining = 0;
- try {
- selector = Selector.open();
-
- // get the sockets, flip them to non-blocking, and set up data
- // structures
- conns = new Connection[sockKeys.keySet().size()];
- numConns = 0;
- for ( Iterator i = sockKeys.keySet().iterator(); i.hasNext(); ) {
- // get SockIO obj from hostname
- String host = i.next();
-
- SockIOPool.SockIO sock = pool.getConnection( host );
-
- if ( sock == null ) {
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( this.mc, new IOException( "no socket to server available" ), keys );
- return;
- }
-
- conns[numConns++] = new Connection( sock, sockKeys.get( host ) );
- }
-
- // the main select loop; ends when
- // 1) we've received data from all the servers, or
- // 2) we time out
- long startTime = System.currentTimeMillis();
-
- long timeout = pool.getMaxBusy();
- timeRemaining = timeout;
-
- while ( numConns > 0 && timeRemaining > 0 ) {
- int n = selector.select( Math.min( timeout, 5000 ) );
- if ( n > 0 ) {
- // we've got some activity; handle it
- Iterator it = selector.selectedKeys().iterator();
- while ( it.hasNext() ) {
- SelectionKey key = it.next();
- it.remove();
- handleKey( key );
- }
- }
- else {
- // timeout likely... better check
- // TODO: This seems like a problem area that we need to figure out how to handle.
- log.error( "selector timed out waiting for activity" );
- }
-
- timeRemaining = timeout - (System.currentTimeMillis() - startTime);
- }
- }
- catch ( IOException e ) {
- // errors can happen just about anywhere above, from
- // connection setup to any of the mechanics
- handleError( e, keys );
- return;
- }
- finally {
- if ( log.isDebugEnabled() )
- log.debug( "Disconnecting; numConns=" + numConns + " timeRemaining=" + timeRemaining );
-
- // run through our conns and either return them to the pool
- // or forcibly close them
- try {
- if ( selector != null )
- selector.close();
- }
- catch ( IOException ignoreMe ) { }
-
- for ( Connection c : conns ) {
- if ( c != null )
- c.close();
- }
- }
-
- // Done! Build the list of results and return them. If we get
- // here by a timeout, then some of the connections are probably
- // not done. But we'll return what we've got...
- for ( Connection c : conns ) {
- try {
- if ( c.incoming.size() > 0 && c.isDone() )
- loadMulti( new ByteBufArrayInputStream( c.incoming ), ret, asString );
- }
- catch ( Exception e ) {
- // shouldn't happen; we have all the data already
- log.warn( "Caught the aforementioned exception on "+c );
- }
- }
- }
-
- private void handleError( Throwable e, String[] keys ) {
- // if we have an errorHandler, use its hook
- if ( errorHandler != null )
- errorHandler.handleErrorOnGet( MemcachedClient.this, e, keys );
-
- // exception thrown
- log.error( "++++ exception thrown while getting from cache on getMulti" );
- log.error( e.getMessage() );
- }
-
- private void handleKey( SelectionKey key ) throws IOException {
- if ( log.isDebugEnabled() )
- log.debug( "handling selector op " + key.readyOps() + " for key " + key );
-
- if ( key.isReadable() )
- readResponse( key );
- else if ( key.isWritable() )
- writeRequest( key );
- }
-
- public void writeRequest( SelectionKey key ) throws IOException {
- ByteBuffer buf = ((Connection) key.attachment()).outgoing;
- SocketChannel sc = (SocketChannel)key.channel();
-
- if ( buf.hasRemaining() ) {
- if ( log.isDebugEnabled() )
- log.debug( "writing " + buf.remaining() + "B to " + ((SocketChannel) key.channel()).socket().getInetAddress() );
-
- sc.write( buf );
- }
-
- if ( !buf.hasRemaining() ) {
- if ( log.isDebugEnabled() )
- log.debug( "switching to read mode for server " + ((SocketChannel)key.channel()).socket().getInetAddress() );
-
- key.interestOps( SelectionKey.OP_READ );
- }
- }
-
- public void readResponse( SelectionKey key ) throws IOException {
- Connection conn = (Connection)key.attachment();
- ByteBuffer buf = conn.getBuffer();
- int count = conn.channel.read( buf );
- if ( count > 0 ) {
- if ( log.isDebugEnabled() )
- log.debug( "read " + count + " from " + conn.channel.socket().getInetAddress() );
-
- if ( conn.isDone() ) {
- if ( log.isDebugEnabled() )
- log.debug( "connection done to " + conn.channel.socket().getInetAddress() );
-
- key.cancel();
- numConns--;
- return;
- }
- }
- }
- }
-}
diff --git a/src/com/meetup/memcached/NativeHandler.java b/src/com/meetup/memcached/NativeHandler.java
deleted file mode 100644
index 7f9a87e..0000000
--- a/src/com/meetup/memcached/NativeHandler.java
+++ /dev/null
@@ -1,443 +0,0 @@
-/**
- * Copyright (c) 2008 Greg Whalin
- * All rights reserved.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the BSD license
- *
- * This library is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE.
- *
- * You should have received a copy of the BSD License along with this
- * library.
- *
- * @author Greg Whalin
- */
-package com.meetup.memcached;
-
-import java.util.Date;
-import org.apache.log4j.Logger;
-
-/**
- * Handle encoding standard Java types directly which can result in significant
- * memory savings:
- *
- * Currently the Memcached driver for Java supports the setSerialize() option.
- * This can increase performance in some situations but has a few issues:
- *
- * Code that performs class casting will throw ClassCastExceptions when
- * setSerialize is enabled. For example:
- *
- * mc.set( "foo", new Integer( 1 ) ); Integer output = (Integer)mc.get("foo");
- *
- * Will work just file when setSerialize is true but when its false will just throw
- * a ClassCastException.
- *
- * Also internally it doesn't support Boolean and since toString is called wastes a
- * lot of memory and causes additional performance issue. For example an Integer
- * can take anywhere from 1 byte to 10 bytes.
- *
- * Due to the way the memcached slab allocator works it seems like a LOT of wasted
- * memory to store primitive types as serialized objects (from a performance and
- * memory perspective). In our applications we have millions of small objects and
- * wasted memory would become a big problem.
- *
- * For example a Serialized Boolean takes 47 bytes which means it will fit into the
- * 64byte LRU. Using 1 byte means it will fit into the 8 byte LRU thus saving 8x
- * the memory. This also saves the CPU performance since we don't have to
- * serialize bytes back and forth and we can compute the byte[] value directly.
- *
- * One problem would be when the user calls get() because doing so would require
- * the app to know the type of the object stored as a bytearray inside memcached
- * (since the user will probably cast).
- *
- * If we assume the basic types are interned we could use the first byte as the
- * type with the remaining bytes as the value. Then on get() we could read the
- * first byte to determine the type and then construct the correct object for it.
- * This would prevent the ClassCastException I talked about above.
- *
- * We could remove the setSerialize() option and just assume that standard VM types
- * are always internd in this manner.
- *
- * mc.set( "foo", new Boolean.TRUE ); Boolean b = (Boolean)mc.get( "foo" );
- *
- * And the type casts would work because internally we would create a new Boolean
- * to return back to the client.
- *
- * This would reduce memory footprint and allow for a virtual implementation of the
- * Externalizable interface which is much faster than Serialzation.
- *
- * Currently the memory improvements would be:
- *
- * java.lang.Boolean - 8x performance improvement (now just two bytes)
- * java.lang.Integer - 16x performance improvement (now just 5 bytes)
- *
- * Most of the other primitive types would benefit from this optimization.
- * java.lang.Character being another obvious example.
- *
- * I know it seems like I'm being really picky here but for our application I'd
- * save 1G of memory right off the bat. We'd go down from 1.152G of memory used
- * down to 144M of memory used which is much better IMO.
- *
- * http://java.sun.com/docs/books/tutorial/native1.1/integrating/types.html
- *
- * @author Kevin A. Burton
- * @author Greg Whalin
- */
-public class NativeHandler {
-
- // logger
- private static Logger log =
- Logger.getLogger( NativeHandler.class.getName() );
-
- /**
- * Detemine of object can be natively serialized by this class.
- *
- * @param value Object to test.
- * @return true/false
- */
- public static boolean isHandled( Object value ) {
-
- return (
- value instanceof Byte ||
- value instanceof Boolean ||
- value instanceof Integer ||
- value instanceof Long ||
- value instanceof Character ||
- value instanceof String ||
- value instanceof StringBuffer ||
- value instanceof Float ||
- value instanceof Short ||
- value instanceof Double ||
- value instanceof Date ||
- value instanceof StringBuilder ||
- value instanceof byte[]
- )
- ? true
- : false;
- }
-
- /**
- * Returns the flag for marking the type of the byte array.
- *
- * @param value Object we are storing.
- * @return int marker
- */
- public static int getMarkerFlag( Object value ) {
-
- if ( value instanceof Byte )
- return MemcachedClient.MARKER_BYTE;
-
- if ( value instanceof Boolean )
- return MemcachedClient.MARKER_BOOLEAN;
-
- if ( value instanceof Integer )
- return MemcachedClient.MARKER_INTEGER;
-
- if ( value instanceof Long )
- return MemcachedClient.MARKER_LONG;
-
- if ( value instanceof Character )
- return MemcachedClient.MARKER_CHARACTER;
-
- if ( value instanceof String )
- return MemcachedClient.MARKER_STRING;
-
- if ( value instanceof StringBuffer )
- return MemcachedClient.MARKER_STRINGBUFFER;
-
- if ( value instanceof Float )
- return MemcachedClient.MARKER_FLOAT;
-
- if ( value instanceof Short )
- return MemcachedClient.MARKER_SHORT;
-
- if ( value instanceof Double )
- return MemcachedClient.MARKER_DOUBLE;
-
- if ( value instanceof Date )
- return MemcachedClient.MARKER_DATE;
-
- if ( value instanceof StringBuilder )
- return MemcachedClient.MARKER_STRINGBUILDER;
-
- if ( value instanceof byte[] )
- return MemcachedClient.MARKER_BYTEARR;
-
- return -1;
- }
-
- /**
- * Encodes supported types
- *
- * @param value Object to encode.
- * @return byte array
- *
- * @throws Exception If fail to encode.
- */
- public static byte[] encode( Object value ) throws Exception {
-
- if ( value instanceof Byte )
- return encode( (Byte)value );
-
- if ( value instanceof Boolean )
- return encode( (Boolean)value );
-
- if ( value instanceof Integer )
- return encode( ((Integer)value).intValue() );
-
- if ( value instanceof Long )
- return encode( ((Long)value).longValue() );
-
- if ( value instanceof Character )
- return encode( (Character)value );
-
- if ( value instanceof String )
- return encode( (String)value );
-
- if ( value instanceof StringBuffer )
- return encode( (StringBuffer)value );
-
- if ( value instanceof Float )
- return encode( ((Float)value).floatValue() );
-
- if ( value instanceof Short )
- return encode( (Short)value );
-
- if ( value instanceof Double )
- return encode( ((Double)value).doubleValue() );
-
- if ( value instanceof Date )
- return encode( (Date)value);
-
- if ( value instanceof StringBuilder )
- return encode( (StringBuilder)value );
-
- if ( value instanceof byte[] )
- return encode( (byte[])value );
-
- return null;
- }
-
- protected static byte[] encode( Byte value ) {
- byte[] b = new byte[1];
- b[0] = value.byteValue();
- return b;
- }
-
- protected static byte[] encode( Boolean value ) {
- byte[] b = new byte[1];
-
- if ( value.booleanValue() )
- b[0] = 1;
- else
- b[0] = 0;
-
- return b;
- }
-
- protected static byte[] encode( int value ) {
- return getBytes( value );
- }
-
- protected static byte[] encode( long value ) throws Exception {
- return getBytes( value );
- }
-
- protected static byte[] encode( Date value ) {
- return getBytes( value.getTime() );
- }
-
- protected static byte[] encode( Character value ) {
- return encode( value.charValue() );
- }
-
- protected static byte[] encode( String value ) throws Exception {
- return value.getBytes( "UTF-8" );
- }
-
- protected static byte[] encode( StringBuffer value ) throws Exception {
- return encode( value.toString() );
- }
-
- protected static byte[] encode( float value ) throws Exception {
- return encode( (int)Float.floatToIntBits( value ) );
- }
-
- protected static byte[] encode( Short value ) throws Exception {
- return encode( (int)value.shortValue() );
- }
-
- protected static byte[] encode( double value ) throws Exception {
- return encode( (long)Double.doubleToLongBits( value ) );
- }
-
- protected static byte[] encode( StringBuilder value ) throws Exception {
- return encode( value.toString() );
- }
-
- protected static byte[] encode( byte[] value ) {
- return value;
- }
-
- protected static byte[] getBytes( long value ) {
- byte[] b = new byte[8];
- b[0] = (byte)((value >> 56) & 0xFF);
- b[1] = (byte)((value >> 48) & 0xFF);
- b[2] = (byte)((value >> 40) & 0xFF);
- b[3] = (byte)((value >> 32) & 0xFF);
- b[4] = (byte)((value >> 24) & 0xFF);
- b[5] = (byte)((value >> 16) & 0xFF);
- b[6] = (byte)((value >> 8) & 0xFF);
- b[7] = (byte)((value >> 0) & 0xFF);
- return b;
- }
-
- protected static byte[] getBytes( int value ) {
- byte[] b = new byte[4];
- b[0] = (byte)((value >> 24) & 0xFF);
- b[1] = (byte)((value >> 16) & 0xFF);
- b[2] = (byte)((value >> 8) & 0xFF);
- b[3] = (byte)((value >> 0) & 0xFF);
- return b;
- }
-
- /**
- * Decodes byte array using memcache flag to determine type.
- *
- * @param b
- * @param marker
- * @return
- * @throws Exception
- */
- public static Object decode( byte[] b, int flag ) throws Exception {
-
- if ( b.length < 1 )
- return null;
-
-
- if ( ( flag & MemcachedClient.MARKER_BYTE ) == MemcachedClient.MARKER_BYTE )
- return decodeByte( b );
-
- if ( ( flag & MemcachedClient.MARKER_BOOLEAN ) == MemcachedClient.MARKER_BOOLEAN )
- return decodeBoolean( b );
-
- if ( ( flag & MemcachedClient.MARKER_INTEGER ) == MemcachedClient.MARKER_INTEGER )
- return decodeInteger( b );
-
- if ( ( flag & MemcachedClient.MARKER_LONG ) == MemcachedClient.MARKER_LONG )
- return decodeLong( b );
-
- if ( ( flag & MemcachedClient.MARKER_CHARACTER ) == MemcachedClient.MARKER_CHARACTER )
- return decodeCharacter( b );
-
- if ( ( flag & MemcachedClient.MARKER_STRING ) == MemcachedClient.MARKER_STRING )
- return decodeString( b );
-
- if ( ( flag & MemcachedClient.MARKER_STRINGBUFFER ) == MemcachedClient.MARKER_STRINGBUFFER )
- return decodeStringBuffer( b );
-
- if ( ( flag & MemcachedClient.MARKER_FLOAT ) == MemcachedClient.MARKER_FLOAT )
- return decodeFloat( b );
-
- if ( ( flag & MemcachedClient.MARKER_SHORT ) == MemcachedClient.MARKER_SHORT )
- return decodeShort( b );
-
- if ( ( flag & MemcachedClient.MARKER_DOUBLE ) == MemcachedClient.MARKER_DOUBLE )
- return decodeDouble( b );
-
- if ( ( flag & MemcachedClient.MARKER_DATE ) == MemcachedClient.MARKER_DATE )
- return decodeDate( b );
-
- if ( ( flag & MemcachedClient.MARKER_STRINGBUILDER ) == MemcachedClient.MARKER_STRINGBUILDER )
- return decodeStringBuilder( b );
-
- if ( ( flag & MemcachedClient.MARKER_BYTEARR ) == MemcachedClient.MARKER_BYTEARR )
- return decodeByteArr( b );
-
- return null;
- }
-
- // decode methods
- protected static Byte decodeByte( byte[] b ) {
- return new Byte( b[0] );
- }
-
- protected static Boolean decodeBoolean( byte[] b ) {
- boolean value = b[0] == 1;
- return ( value ) ? Boolean.TRUE : Boolean.FALSE;
- }
-
- protected static Integer decodeInteger( byte[] b ) {
- return new Integer( toInt( b ) );
- }
-
- protected static Long decodeLong( byte[] b ) throws Exception {
- return new Long( toLong( b ) );
- }
-
- protected static Character decodeCharacter( byte[] b ) {
- return new Character( (char)decodeInteger( b ).intValue() );
- }
-
- protected static String decodeString( byte[] b ) throws Exception {
- return new String( b, "UTF-8" );
- }
-
- protected static StringBuffer decodeStringBuffer( byte[] b ) throws Exception {
- return new StringBuffer( decodeString( b ) );
- }
-
- protected static Float decodeFloat( byte[] b ) throws Exception {
- Integer l = decodeInteger( b );
- return new Float( Float.intBitsToFloat( l.intValue() ) );
- }
-
- protected static Short decodeShort( byte[] b ) throws Exception {
- return new Short( (short)decodeInteger( b ).intValue() );
- }
-
- protected static Double decodeDouble( byte[] b ) throws Exception {
- Long l = decodeLong( b );
- return new Double( Double.longBitsToDouble( l.longValue() ) );
- }
-
- protected static Date decodeDate( byte[] b ) {
- return new Date( toLong( b ) );
- }
-
- protected static StringBuilder decodeStringBuilder( byte[] b ) throws Exception {
- return new StringBuilder( decodeString( b ) );
- }
-
- protected static byte[] decodeByteArr( byte[] b ) {
- return b;
- }
-
- /**
- * This works by taking each of the bit patterns and converting them to
- * ints taking into account 2s complement and then adding them..
- *
- * @param b
- * @return
- */
- protected static int toInt( byte[] b ) {
- return (((((int) b[3]) & 0xFF) << 32) +
- ((((int) b[2]) & 0xFF) << 40) +
- ((((int) b[1]) & 0xFF) << 48) +
- ((((int) b[0]) & 0xFF) << 56));
- }
-
- protected static long toLong( byte[] b ) {
- return ((((long) b[7]) & 0xFF) +
- ((((long) b[6]) & 0xFF) << 8) +
- ((((long) b[5]) & 0xFF) << 16) +
- ((((long) b[4]) & 0xFF) << 24) +
- ((((long) b[3]) & 0xFF) << 32) +
- ((((long) b[2]) & 0xFF) << 40) +
- ((((long) b[1]) & 0xFF) << 48) +
- ((((long) b[0]) & 0xFF) << 56));
- }
-}
diff --git a/src/com/meetup/memcached/NestedIOException.java b/src/com/meetup/memcached/NestedIOException.java
deleted file mode 100644
index 264605e..0000000
--- a/src/com/meetup/memcached/NestedIOException.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Copyright (c) 2008 Greg Whalin
- * All rights reserved.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the BSD license
- *
- * This library is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE.
- *
- * You should have received a copy of the BSD License along with this
- * library.
- *
- * @author Kevin A. Burton
- */
-package com.meetup.memcached;
-
-import java.io.*;
-
-/**
- * Bridge class to provide nested Exceptions with IOException which has
- * constructors that don't take Throwables.
- *
- * @author Kevin Burton
- * @version 1.2
- */
-public class NestedIOException extends IOException {
-
- /**
- * Create a new NestedIOException
instance.
- * @param cause object of type throwable
- */
- public NestedIOException( Throwable cause ) {
- super( cause.getMessage() );
- super.initCause( cause );
- }
-
- public NestedIOException( String message, Throwable cause ) {
- super( message );
- initCause( cause );
- }
-}
diff --git a/src/com/meetup/memcached/SockIOPool.java b/src/com/meetup/memcached/SockIOPool.java
deleted file mode 100644
index 6fe0c92..0000000
--- a/src/com/meetup/memcached/SockIOPool.java
+++ /dev/null
@@ -1,1903 +0,0 @@
-/**
- * Copyright (c) 2008 Greg Whalin
- * All rights reserved.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the BSD license
- *
- * This library is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE.
- *
- * You should have received a copy of the BSD License along with this
- * library.
- *
- * @author greg whalin
- */
-package com.meetup.memcached;
-
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-
-// java.util
-import java.util.Map;
-import java.util.List;
-import java.util.Set;
-import java.util.Iterator;
-import java.util.ArrayList;
-import java.util.IdentityHashMap;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Date;
-import java.util.Arrays;
-import java.util.SortedMap;
-import java.util.TreeMap;
-
-import java.util.zip.*;
-import java.net.*;
-import java.io.*;
-import java.nio.*;
-import java.nio.channels.*;
-import java.util.concurrent.locks.ReentrantLock;
-import org.apache.log4j.Logger;
-
-/**
- * This class is a connection pool for maintaning a pool of persistent connections
- * to memcached servers.
- *
- * The pool must be initialized prior to use. This should typically be early on
- * in the lifecycle of the JVM instance.
- *
- * An example of initializing using defaults:
- *
- *
- * static {
- * String[] serverlist = { "cache0.server.com:12345", "cache1.server.com:12345" };
- *
- * SockIOPool pool = SockIOPool.getInstance();
- * pool.setServers(serverlist);
- * pool.initialize();
- * }
- *
- * An example of initializing using defaults and providing weights for servers:
- *
- * static {
- * String[] serverlist = { "cache0.server.com:12345", "cache1.server.com:12345" };
- * Integer[] weights = { new Integer(5), new Integer(2) };
- *
- * SockIOPool pool = SockIOPool.getInstance();
- * pool.setServers(serverlist);
- * pool.setWeights(weights);
- * pool.initialize();
- * }
- *
- * An example of initializing overriding defaults:
- *
- * static {
- * String[] serverlist = { "cache0.server.com:12345", "cache1.server.com:12345" };
- * Integer[] weights = { new Integer(5), new Integer(2) };
- * int initialConnections = 10;
- * int minSpareConnections = 5;
- * int maxSpareConnections = 50;
- * long maxIdleTime = 1000 * 60 * 30; // 30 minutes
- * long maxBusyTime = 1000 * 60 * 5; // 5 minutes
- * long maintThreadSleep = 1000 * 5; // 5 seconds
- * int socketTimeOut = 1000 * 3; // 3 seconds to block on reads
- * int socketConnectTO = 1000 * 3; // 3 seconds to block on initial connections. If 0, then will use blocking connect (default)
- * boolean failover = false; // turn off auto-failover in event of server down
- * boolean nagleAlg = false; // turn off Nagle's algorithm on all sockets in pool
- * boolean aliveCheck = false; // disable health check of socket on checkout
- *
- * SockIOPool pool = SockIOPool.getInstance();
- * pool.setServers( serverlist );
- * pool.setWeights( weights );
- * pool.setInitConn( initialConnections );
- * pool.setMinConn( minSpareConnections );
- * pool.setMaxConn( maxSpareConnections );
- * pool.setMaxIdle( maxIdleTime );
- * pool.setMaxBusyTime( maxBusyTime );
- * pool.setMaintSleep( maintThreadSleep );
- * pool.setSocketTO( socketTimeOut );
- * pool.setNagle( nagleAlg );
- * pool.setHashingAlg( SockIOPool.NEW_COMPAT_HASH );
- * pool.setAliveCheck( true );
- * pool.initialize();
- * }
- *
- * The easiest manner in which to initialize the pool is to set the servers and rely on defaults as in the first example.
- * After pool is initialized, a client will request a SockIO object by calling getSock with the cache key
- * The client must always close the SockIO object when finished, which will return the connection back to the pool.
- * An example of retrieving a SockIO object:
- *
- * SockIOPool.SockIO sock = SockIOPool.getInstance().getSock( key );
- * try {
- * sock.write( "version\r\n" );
- * sock.flush();
- * System.out.println( "Version: " + sock.readLine() );
- * }
- * catch (IOException ioe) { System.out.println( "io exception thrown" ) };
- *
- * sock.close();
- *
- *
- * @author greg whalin
- * @version 1.5
- */
-public class SockIOPool {
-
- // logger
- private static Logger log =
- Logger.getLogger( SockIOPool.class.getName() );
-
- // store instances of pools
- private static Map pools =
- new HashMap();
-
- // avoid recurring construction
- private static ThreadLocal MD5 = new ThreadLocal() {
- @Override
- protected MessageDigest initialValue() {
- try {
- return MessageDigest.getInstance( "MD5" );
- }
- catch ( NoSuchAlgorithmException e ) {
- log.error( "++++ no md5 algorithm found" );
- throw new IllegalStateException( "++++ no md5 algorythm found");
- }
- }
- };
-
- // Constants
- private static final Integer ZERO = new Integer( 0 );
- public static final int NATIVE_HASH = 0; // native String.hashCode();
- public static final int OLD_COMPAT_HASH = 1; // original compatibility hashing algorithm (works with other clients)
- public static final int NEW_COMPAT_HASH = 2; // new CRC32 based compatibility hashing algorithm (works with other clients)
- public static final int CONSISTENT_HASH = 3; // MD5 Based -- Stops thrashing when a server added or removed
-
- public static final long MAX_RETRY_DELAY = 10 * 60 * 1000; // max of 10 minute delay for fall off
-
- // Pool data
- private MaintThread maintThread;
- private boolean initialized = false;
- private int maxCreate = 1; // this will be initialized by pool when the pool is initialized
-
- // initial, min and max pool sizes
- private int poolMultiplier = 3;
- private int initConn = 10;
- private int minConn = 5;
- private int maxConn = 100;
- private long maxIdle = 1000 * 60 * 5; // max idle time for avail sockets
- private long maxBusyTime = 1000 * 30; // max idle time for avail sockets
- private long maintSleep = 1000 * 30; // maintenance thread sleep time
- private int socketTO = 1000 * 3; // default timeout of socket reads
- private int socketConnectTO = 1000 * 3; // default timeout of socket connections
- private boolean aliveCheck = false; // default to not check each connection for being alive
- private boolean failover = true; // default to failover in event of cache server dead
- private boolean failback = true; // only used if failover is also set ... controls putting a dead server back into rotation
- private boolean nagle = false; // enable/disable Nagle's algorithm
- private int hashingAlg = NATIVE_HASH; // default to using the native hash as it is the fastest
-
- // locks
- private final ReentrantLock hostDeadLock = new ReentrantLock();
-
- // list of all servers
- private String[] servers;
- private Integer[] weights;
- private Integer totalWeight = 0;
-
- private List buckets;
- private TreeMap consistentBuckets;
-
- // dead server map
- private Map hostDead;
- private Map hostDeadDur;
-
- // map to hold all available sockets
- // map to hold busy sockets
- // set to hold sockets to close
- private Map> availPool;
- private Map> busyPool;
- private Map deadPool;
-
- // empty constructor
- protected SockIOPool() { }
-
- /**
- * Factory to create/retrieve new pools given a unique poolName.
- *
- * @param poolName unique name of the pool
- * @return instance of SockIOPool
- */
- public static synchronized SockIOPool getInstance( String poolName ) {
- if ( pools.containsKey( poolName ) )
- return pools.get( poolName );
-
- SockIOPool pool = new SockIOPool();
- pools.put( poolName, pool );
-
- return pool;
- }
-
- /**
- * Single argument version of factory used for back compat.
- * Simply creates a pool named "default".
- *
- * @return instance of SockIOPool
- */
- public static SockIOPool getInstance() {
- return getInstance( "default" );
- }
-
- /**
- * Sets the list of all cache servers.
- *
- * @param servers String array of servers [host:port]
- */
- public void setServers( String[] servers ) { this.servers = servers; }
-
- /**
- * Returns the current list of all cache servers.
- *
- * @return String array of servers [host:port]
- */
- public String[] getServers() { return this.servers; }
-
- /**
- * Sets the list of weights to apply to the server list.
- *
- * This is an int array with each element corresponding to an element
- * in the same position in the server String array.
- *
- * @param weights Integer array of weights
- */
- public void setWeights( Integer[] weights ) { this.weights = weights; }
-
- /**
- * Returns the current list of weights.
- *
- * @return int array of weights
- */
- public Integer[] getWeights() { return this.weights; }
-
- /**
- * Sets the initial number of connections per server in the available pool.
- *
- * @param initConn int number of connections
- */
- public void setInitConn( int initConn ) { this.initConn = initConn; }
-
- /**
- * Returns the current setting for the initial number of connections per server in
- * the available pool.
- *
- * @return number of connections
- */
- public int getInitConn() { return this.initConn; }
-
- /**
- * Sets the minimum number of spare connections to maintain in our available pool.
- *
- * @param minConn number of connections
- */
- public void setMinConn( int minConn ) { this.minConn = minConn; }
-
- /**
- * Returns the minimum number of spare connections in available pool.
- *
- * @return number of connections
- */
- public int getMinConn() { return this.minConn; }
-
- /**
- * Sets the maximum number of spare connections allowed in our available pool.
- *
- * @param maxConn number of connections
- */
- public void setMaxConn( int maxConn ) { this.maxConn = maxConn; }
-
- /**
- * Returns the maximum number of spare connections allowed in available pool.
- *
- * @return number of connections
- */
- public int getMaxConn() { return this.maxConn; }
-
- /**
- * Sets the max idle time for threads in the available pool.
- *
- * @param maxIdle idle time in ms
- */
- public void setMaxIdle( long maxIdle ) { this.maxIdle = maxIdle; }
-
- /**
- * Returns the current max idle setting.
- *
- * @return max idle setting in ms
- */
- public long getMaxIdle() { return this.maxIdle; }
-
- /**
- * Sets the max busy time for threads in the busy pool.
- *
- * @param maxBusyTime idle time in ms
- */
- public void setMaxBusyTime( long maxBusyTime ) { this.maxBusyTime = maxBusyTime; }
-
- /**
- * Returns the current max busy setting.
- *
- * @return max busy setting in ms
- */
- public long getMaxBusy() { return this.maxBusyTime; }
-
- /**
- * Set the sleep time between runs of the pool maintenance thread.
- * If set to 0, then the maint thread will not be started.
- *
- * @param maintSleep sleep time in ms
- */
- public void setMaintSleep( long maintSleep ) { this.maintSleep = maintSleep; }
-
- /**
- * Returns the current maint thread sleep time.
- *
- * @return sleep time in ms
- */
- public long getMaintSleep() { return this.maintSleep; }
-
- /**
- * Sets the socket timeout for reads.
- *
- * @param socketTO timeout in ms
- */
- public void setSocketTO( int socketTO ) { this.socketTO = socketTO; }
-
- /**
- * Returns the socket timeout for reads.
- *
- * @return timeout in ms
- */
- public int getSocketTO() { return this.socketTO; }
-
- /**
- * Sets the socket timeout for connect.
- *
- * @param socketConnectTO timeout in ms
- */
- public void setSocketConnectTO( int socketConnectTO ) { this.socketConnectTO = socketConnectTO; }
-
- /**
- * Returns the socket timeout for connect.
- *
- * @return timeout in ms
- */
- public int getSocketConnectTO() { return this.socketConnectTO; }
-
- /**
- * Sets the failover flag for the pool.
- *
- * If this flag is set to true, and a socket fails to connect,
- * the pool will attempt to return a socket from another server
- * if one exists. If set to false, then getting a socket
- * will return null if it fails to connect to the requested server.
- *
- * @param failover true/false
- */
- public void setFailover( boolean failover ) { this.failover = failover; }
-
- /**
- * Returns current state of failover flag.
- *
- * @return true/false
- */
- public boolean getFailover() { return this.failover; }
-
- /**
- * Sets the failback flag for the pool.
- *
- * If this is true and we have marked a host as dead,
- * will try to bring it back. If it is false, we will never
- * try to resurrect a dead host.
- *
- * @param failback true/false
- */
- public void setFailback( boolean failback ) { this.failback = failback; }
-
- /**
- * Returns current state of failover flag.
- *
- * @return true/false
- */
- public boolean getFailback() { return this.failback; }
-
- /**
- * Sets the aliveCheck flag for the pool.
- *
- * When true, this will attempt to talk to the server on
- * every connection checkout to make sure the connection is
- * still valid. This adds extra network chatter and thus is
- * defaulted off. May be useful if you want to ensure you do
- * not have any problems talking to the server on a dead connection.
- *
- * @param aliveCheck true/false
- */
- public void setAliveCheck( boolean aliveCheck ) { this.aliveCheck = aliveCheck; }
-
-
- /**
- * Returns the current status of the aliveCheck flag.
- *
- * @return true / false
- */
- public boolean getAliveCheck() { return this.aliveCheck; }
-
- /**
- * Sets the Nagle alg flag for the pool.
- *
- * If false, will turn off Nagle's algorithm on all sockets created.
- *
- * @param nagle true/false
- */
- public void setNagle( boolean nagle ) { this.nagle = nagle; }
-
- /**
- * Returns current status of nagle flag
- *
- * @return true/false
- */
- public boolean getNagle() { return this.nagle; }
-
- /**
- * Sets the hashing algorithm we will use.
- *
- * The types are as follows.
- *
- * SockIOPool.NATIVE_HASH (0) - native String.hashCode() - fast (cached) but not compatible with other clients
- * SockIOPool.OLD_COMPAT_HASH (1) - original compatibility hashing alg (works with other clients)
- * SockIOPool.NEW_COMPAT_HASH (2) - new CRC32 based compatibility hashing algorithm (fast and works with other clients)
- *
- * @param alg int value representing hashing algorithm
- */
- public void setHashingAlg( int alg ) { this.hashingAlg = alg; }
-
- /**
- * Returns current status of customHash flag
- *
- * @return true/false
- */
- public int getHashingAlg() { return this.hashingAlg; }
-
- /**
- * Internal private hashing method.
- *
- * This is the original hashing algorithm from other clients.
- * Found to be slow and have poor distribution.
- *
- * @param key String to hash
- * @return hashCode for this string using our own hashing algorithm
- */
- private static long origCompatHashingAlg( String key ) {
- long hash = 0;
- char[] cArr = key.toCharArray();
-
- for ( int i = 0; i < cArr.length; ++i ) {
- hash = (hash * 33) + cArr[i];
- }
-
- return hash;
- }
-
- /**
- * Internal private hashing method.
- *
- * This is the new hashing algorithm from other clients.
- * Found to be fast and have very good distribution.
- *
- * UPDATE: This is dog slow under java
- *
- * @param key
- * @return
- */
- private static long newCompatHashingAlg( String key ) {
- CRC32 checksum = new CRC32();
- checksum.update( key.getBytes() );
- long crc = checksum.getValue();
- return (crc >> 16) & 0x7fff;
- }
-
- /**
- * Internal private hashing method.
- *
- * MD5 based hash algorithm for use in the consistent
- * hashing approach.
- *
- * @param key
- * @return
- */
- private static long md5HashingAlg( String key ) {
- MessageDigest md5 = MD5.get();
- md5.reset();
- md5.update( key.getBytes() );
- byte[] bKey = md5.digest();
- long res = ((long)(bKey[3]&0xFF) << 24) | ((long)(bKey[2]&0xFF) << 16) | ((long)(bKey[1]&0xFF) << 8) | (long)(bKey[0]&0xFF);
- return res;
- }
-
- /**
- * Returns a bucket to check for a given key.
- *
- * @param key String key cache is stored under
- * @return int bucket
- */
- private long getHash( String key, Integer hashCode ) {
-
- if ( hashCode != null ) {
- if ( hashingAlg == CONSISTENT_HASH )
- return hashCode.longValue() & 0xffffffffL;
- else
- return hashCode.longValue();
- }
- else {
- switch ( hashingAlg ) {
- case NATIVE_HASH:
- return (long)key.hashCode();
- case OLD_COMPAT_HASH:
- return origCompatHashingAlg( key );
- case NEW_COMPAT_HASH:
- return newCompatHashingAlg( key );
- case CONSISTENT_HASH:
- return md5HashingAlg( key );
- default:
- // use the native hash as a default
- hashingAlg = NATIVE_HASH;
- return (long)key.hashCode();
- }
- }
- }
-
- private long getBucket( String key, Integer hashCode ) {
- long hc = getHash( key, hashCode );
-
- if ( this.hashingAlg == CONSISTENT_HASH ) {
- return findPointFor( hc );
- }
- else {
- long bucket = hc % buckets.size();
- if ( bucket < 0 ) bucket *= -1;
- return bucket;
- }
- }
-
- /**
- * Gets the first available key equal or above the given one, if none found,
- * returns the first k in the bucket
- * @param k key
- * @return
- */
- private Long findPointFor( Long hv ) {
- // this works in java 6, but still want to release support for java5
- //Long k = this.consistentBuckets.ceilingKey( hv );
- //return ( k == null ) ? this.consistentBuckets.firstKey() : k;
-
- SortedMap tmap =
- this.consistentBuckets.tailMap( hv );
-
- return ( tmap.isEmpty() ) ? this.consistentBuckets.firstKey() : tmap.firstKey();
- }
-
- /**
- * Initializes the pool.
- */
- public void initialize() {
-
- synchronized( this ) {
-
- // check to see if already initialized
- if ( initialized
- && ( buckets != null || consistentBuckets != null )
- && ( availPool != null )
- && ( busyPool != null ) ) {
- log.error( "++++ trying to initialize an already initialized pool" );
- return;
- }
-
- // pools
- availPool = new HashMap>( servers.length * initConn );
- busyPool = new HashMap>( servers.length * initConn );
- deadPool = new IdentityHashMap();
-
- hostDeadDur = new HashMap();
- hostDead = new HashMap();
- maxCreate = (poolMultiplier > minConn) ? minConn : minConn / poolMultiplier; // only create up to maxCreate connections at once
-
- if ( log.isDebugEnabled() ) {
- log.debug( "++++ initializing pool with following settings:" );
- log.debug( "++++ initial size: " + initConn );
- log.debug( "++++ min spare : " + minConn );
- log.debug( "++++ max spare : " + maxConn );
- }
-
- // if servers is not set, or it empty, then
- // throw a runtime exception
- if ( servers == null || servers.length <= 0 ) {
- log.error( "++++ trying to initialize with no servers" );
- throw new IllegalStateException( "++++ trying to initialize with no servers" );
- }
-
- // initalize our internal hashing structures
- if ( this.hashingAlg == CONSISTENT_HASH )
- populateConsistentBuckets();
- else
- populateBuckets();
-
- // mark pool as initialized
- this.initialized = true;
-
- // start maint thread
- if ( this.maintSleep > 0 )
- this.startMaintThread();
- }
- }
-
- private void populateBuckets() {
- if ( log.isDebugEnabled() )
- log.debug( "++++ initializing internal hashing structure for consistent hashing" );
-
- // store buckets in tree map
- this.buckets = new ArrayList();
-
- for ( int i = 0; i < servers.length; i++ ) {
- if ( this.weights != null && this.weights.length > i ) {
- for ( int k = 0; k < this.weights[i].intValue(); k++ ) {
- this.buckets.add( servers[i] );
- if ( log.isDebugEnabled() )
- log.debug( "++++ added " + servers[i] + " to server bucket" );
- }
- }
- else {
- this.buckets.add( servers[i] );
- if ( log.isDebugEnabled() )
- log.debug( "++++ added " + servers[i] + " to server bucket" );
- }
-
- // create initial connections
- if ( log.isDebugEnabled() )
- log.debug( "+++ creating initial connections (" + initConn + ") for host: " + servers[i] );
-
- for ( int j = 0; j < initConn; j++ ) {
- SockIO socket = createSocket( servers[i] );
- if ( socket == null ) {
- log.error( "++++ failed to create connection to: " + servers[i] + " -- only " + j + " created." );
- break;
- }
-
- addSocketToPool( availPool, servers[i], socket );
- if ( log.isDebugEnabled() )
- log.debug( "++++ created and added socket: " + socket.toString() + " for host " + servers[i] );
- }
- }
- }
-
- private void populateConsistentBuckets() {
- if ( log.isDebugEnabled() )
- log.debug( "++++ initializing internal hashing structure for consistent hashing" );
-
- // store buckets in tree map
- this.consistentBuckets = new TreeMap();
-
- MessageDigest md5 = MD5.get();
- if ( this.totalWeight <= 0 && this.weights != null ) {
- for ( int i = 0; i < this.weights.length; i++ )
- this.totalWeight += ( this.weights[i] == null ) ? 1 : this.weights[i];
- }
- else if ( this.weights == null ) {
- this.totalWeight = this.servers.length;
- }
-
- for ( int i = 0; i < servers.length; i++ ) {
- int thisWeight = 1;
- if ( this.weights != null && this.weights[i] != null )
- thisWeight = this.weights[i];
-
- double factor = Math.floor( ((double)(40 * this.servers.length * thisWeight)) / (double)this.totalWeight );
-
- for ( long j = 0; j < factor; j++ ) {
- byte[] d = md5.digest( ( servers[i] + "-" + j ).getBytes() );
- for ( int h = 0 ; h < 4; h++ ) {
- Long k =
- ((long)(d[3+h*4]&0xFF) << 24)
- | ((long)(d[2+h*4]&0xFF) << 16)
- | ((long)(d[1+h*4]&0xFF) << 8)
- | ((long)(d[0+h*4]&0xFF));
-
- consistentBuckets.put( k, servers[i] );
- if ( log.isDebugEnabled() )
- log.debug( "++++ added " + servers[i] + " to server bucket" );
- }
- }
-
- // create initial connections
- if ( log.isDebugEnabled() )
- log.debug( "+++ creating initial connections (" + initConn + ") for host: " + servers[i] );
-
- for ( int j = 0; j < initConn; j++ ) {
- SockIO socket = createSocket( servers[i] );
- if ( socket == null ) {
- log.error( "++++ failed to create connection to: " + servers[i] + " -- only " + j + " created." );
- break;
- }
-
- addSocketToPool( availPool, servers[i], socket );
- if ( log.isDebugEnabled() )
- log.debug( "++++ created and added socket: " + socket.toString() + " for host " + servers[i] );
- }
- }
- }
-
- /**
- * Returns state of pool.
- *
- * @return true
if initialized.
- */
- public boolean isInitialized() {
- return initialized;
- }
-
- /**
- * Creates a new SockIO obj for the given server.
- *
- * If server fails to connect, then return null and do not try
- * again until a duration has passed. This duration will grow
- * by doubling after each failed attempt to connect.
- *
- * @param host host:port to connect to
- * @return SockIO obj or null if failed to create
- */
- protected SockIO createSocket( String host ) {
-
- SockIO socket = null;
-
- // if host is dead, then we don't need to try again
- // until the dead status has expired
- // we do not try to put back in if failback is off
- hostDeadLock.lock();
- try {
- if ( failover && failback && hostDead.containsKey( host ) && hostDeadDur.containsKey( host ) ) {
-
- Date store = hostDead.get( host );
- long expire = hostDeadDur.get( host ).longValue();
-
- if ( (store.getTime() + expire) > System.currentTimeMillis() )
- return null;
- }
- }
- finally {
- hostDeadLock.unlock();
- }
-
- try {
- socket = new SockIO( this, host, this.socketTO, this.socketConnectTO, this.nagle );
-
- if ( !socket.isConnected() ) {
- log.error( "++++ failed to get SockIO obj for: " + host + " -- new socket is not connected" );
- deadPool.put( socket, ZERO );
- socket = null;
- }
- }
- catch ( Exception ex ) {
- log.error( "++++ failed to get SockIO obj for: " + host );
- log.error( ex.getMessage(), ex );
- socket = null;
- }
-
- // if we failed to get socket, then mark
- // host dead for a duration which falls off
- hostDeadLock.lock();
- try {
- if ( socket == null ) {
- Date now = new Date();
- hostDead.put( host, now );
-
- long expire = ( hostDeadDur.containsKey( host ) ) ? (((Long)hostDeadDur.get( host )).longValue() * 2) : 1000;
-
- if ( expire > MAX_RETRY_DELAY )
- expire = MAX_RETRY_DELAY;
-
- hostDeadDur.put( host, new Long( expire ) );
- if ( log.isDebugEnabled() )
- log.debug( "++++ ignoring dead host: " + host + " for " + expire + " ms" );
-
- // also clear all entries for this host from availPool
- clearHostFromPool( availPool, host );
- }
- else {
- if ( log.isDebugEnabled() )
- log.debug( "++++ created socket (" + socket.toString() + ") for host: " + host );
- if ( hostDead.containsKey( host ) || hostDeadDur.containsKey( host ) ) {
- hostDead.remove( host );
- hostDeadDur.remove( host );
- }
- }
- }
- finally {
- hostDeadLock.unlock();
- }
-
- return socket;
- }
-
- /**
- * @param key
- * @return
- */
- public String getHost( String key ) {
- return getHost( key, null );
- }
-
- /**
- * Gets the host that a particular key / hashcode resides on.
- *
- * @param key
- * @param hashcode
- * @return
- */
- public String getHost( String key, Integer hashcode ) {
- SockIO socket = getSock( key, hashcode );
- String host = socket.getHost();
- socket.close();
- return host;
- }
-
- /**
- * Returns appropriate SockIO object given
- * string cache key.
- *
- * @param key hashcode for cache key
- * @return SockIO obj connected to server
- */
- public SockIO getSock( String key ) {
- return getSock( key, null );
- }
-
- /**
- * Returns appropriate SockIO object given
- * string cache key and optional hashcode.
- *
- * Trys to get SockIO from pool. Fails over
- * to additional pools in event of server failure.
- *
- * @param key hashcode for cache key
- * @param hashCode if not null, then the int hashcode to use
- * @return SockIO obj connected to server
- */
- public SockIO getSock( String key, Integer hashCode ) {
-
- if ( log.isDebugEnabled() )
- log.debug( "cache socket pick " + key + " " + hashCode );
-
- if ( !this.initialized ) {
- log.error( "attempting to get SockIO from uninitialized pool!" );
- return null;
- }
-
- // if no servers return null
- if ( ( this.hashingAlg == CONSISTENT_HASH && consistentBuckets.size() == 0 )
- || ( buckets != null && buckets.size() == 0 ) )
- return null;
-
- // if only one server, return it
- if ( ( this.hashingAlg == CONSISTENT_HASH && consistentBuckets.size() == 1 )
- || ( buckets != null && buckets.size() == 1 ) ) {
-
- SockIO sock = ( this.hashingAlg == CONSISTENT_HASH )
- ? getConnection( consistentBuckets.get( consistentBuckets.firstKey() ) )
- : getConnection( buckets.get( 0 ) );
-
- if ( sock != null && sock.isConnected() ) {
- if ( aliveCheck ) {
- if ( !sock.isAlive() ) {
- sock.close();
- try { sock.trueClose(); } catch ( IOException ioe ) { log.error( "failed to close dead socket" ); }
- sock = null;
- }
- }
- }
- else {
- if ( sock != null ) {
- deadPool.put( sock, ZERO );
- sock = null;
- }
- }
-
- return sock;
- }
-
- // from here on, we are working w/ multiple servers
- // keep trying different servers until we find one
- // making sure we only try each server one time
- Set tryServers = new HashSet( Arrays.asList( servers ) );
-
- // get initial bucket
- long bucket = getBucket( key, hashCode );
- String server = ( this.hashingAlg == CONSISTENT_HASH )
- ? consistentBuckets.get( bucket )
- : buckets.get( (int)bucket );
-
- while ( !tryServers.isEmpty() ) {
-
- // try to get socket from bucket
- SockIO sock = getConnection( server );
-
- if ( log.isDebugEnabled() )
- log.debug( "cache choose " + server + " for " + key );
-
- if ( sock != null && sock.isConnected() ) {
- if ( aliveCheck ) {
- if ( sock.isAlive() ) {
- return sock;
- }
- else {
- sock.close();
- try { sock.trueClose(); } catch ( IOException ioe ) { log.error( "failed to close dead socket" ); }
- sock = null;
- }
- }
- else {
- return sock;
- }
- }
- else {
- if ( sock != null ) {
- deadPool.put( sock, ZERO );
- sock = null;
- }
- }
-
- // if we do not want to failover, then bail here
- if ( !failover )
- return null;
-
- // log that we tried
- tryServers.remove( server );
-
- if ( tryServers.isEmpty() )
- break;
-
- // if we failed to get a socket from this server
- // then we try again by adding an incrementer to the
- // current key and then rehashing
- int rehashTries = 0;
- while ( !tryServers.contains( server ) ) {
-
- String newKey = String.format( "%s%s", rehashTries, key );
- if ( log.isDebugEnabled() )
- log.debug( "rehashing with: " + newKey );
-
- bucket = getBucket( newKey, null );
- server = ( this.hashingAlg == CONSISTENT_HASH )
- ? consistentBuckets.get( bucket )
- : buckets.get( (int)bucket );
-
- rehashTries++;
- }
- }
-
- return null;
- }
-
- /**
- * Returns a SockIO object from the pool for the passed in host.
- *
- * Meant to be called from a more intelligent method
- * which handles choosing appropriate server
- * and failover.
- *
- * @param host host from which to retrieve object
- * @return SockIO object or null if fail to retrieve one
- */
- public SockIO getConnection( String host ) {
-
- if ( !this.initialized ) {
- log.error( "attempting to get SockIO from uninitialized pool!" );
- return null;
- }
-
- if ( host == null )
- return null;
-
- synchronized( this ) {
-
- // if we have items in the pool
- // then we can return it
- if ( availPool != null && !availPool.isEmpty() ) {
-
- // take first connected socket
- Map aSockets = availPool.get( host );
-
- if ( aSockets != null && !aSockets.isEmpty() ) {
-
- for ( Iterator i = aSockets.keySet().iterator(); i.hasNext(); ) {
- SockIO socket = i.next();
-
- if ( socket.isConnected() ) {
- if ( log.isDebugEnabled() )
- log.debug( "++++ moving socket for host (" + host + ") to busy pool ... socket: " + socket );
-
- // remove from avail pool
- i.remove();
-
- // add to busy pool
- addSocketToPool( busyPool, host, socket );
-
- // return socket
- return socket;
- }
- else {
- // add to deadpool for later reaping
- deadPool.put( socket, ZERO );
-
- // remove from avail pool
- i.remove();
- }
- }
- }
- }
- }
-
- // create one socket -- let the maint thread take care of creating more
- SockIO socket = createSocket( host );
- if ( socket != null ) {
- synchronized( this ) {
- addSocketToPool( busyPool, host, socket );
- }
- }
-
- return socket;
- }
-
- /**
- * Adds a socket to a given pool for the given host.
- * THIS METHOD IS NOT THREADSAFE, SO BE CAREFUL WHEN USING!
- *
- * Internal utility method.
- *
- * @param pool pool to add to
- * @param host host this socket is connected to
- * @param socket socket to add
- */
- protected void addSocketToPool( Map> pool, String host, SockIO socket ) {
-
- if ( pool.containsKey( host ) ) {
- Map sockets = pool.get( host );
-
- if ( sockets != null ) {
- sockets.put( socket, new Long( System.currentTimeMillis() ) );
- return;
- }
- }
-
- Map sockets =
- new IdentityHashMap();
-
- sockets.put( socket, new Long( System.currentTimeMillis() ) );
- pool.put( host, sockets );
- }
-
- /**
- * Removes a socket from specified pool for host.
- * THIS METHOD IS NOT THREADSAFE, SO BE CAREFUL WHEN USING!
- *
- * Internal utility method.
- *
- * @param pool pool to remove from
- * @param host host pool
- * @param socket socket to remove
- */
- protected void removeSocketFromPool( Map> pool, String host, SockIO socket ) {
- if ( pool.containsKey( host ) ) {
- Map sockets = pool.get( host );
- if ( sockets != null )
- sockets.remove( socket );
- }
- }
-
- /**
- * Closes and removes all sockets from specified pool for host.
- * THIS METHOD IS NOT THREADSAFE, SO BE CAREFUL WHEN USING!
- *
- * Internal utility method.
- *
- * @param pool pool to clear
- * @param host host to clear
- */
- protected void clearHostFromPool( Map> pool, String host ) {
-
- if ( pool.containsKey( host ) ) {
- Map sockets = pool.get( host );
-
- if ( sockets != null && sockets.size() > 0 ) {
- for ( Iterator i = sockets.keySet().iterator(); i.hasNext(); ) {
- SockIO socket = i.next();
- try {
- socket.trueClose();
- }
- catch ( IOException ioe ) {
- log.error( "++++ failed to close socket: " + ioe.getMessage() );
- }
-
- i.remove();
- socket = null;
- }
- }
- }
- }
-
- /**
- * Checks a SockIO object in with the pool.
- *
- * This will remove SocketIO from busy pool, and optionally
- * add to avail pool.
- *
- * @param socket socket to return
- * @param addToAvail add to avail pool if true
- */
- private void checkIn( SockIO socket, boolean addToAvail ) {
-
- String host = socket.getHost();
- if ( log.isDebugEnabled() )
- log.debug( "++++ calling check-in on socket: " + socket.toString() + " for host: " + host );
-
- synchronized( this ) {
- // remove from the busy pool
- if ( log.isDebugEnabled() )
- log.debug( "++++ removing socket (" + socket.toString() + ") from busy pool for host: " + host );
- removeSocketFromPool( busyPool, host, socket );
-
- if ( socket.isConnected() && addToAvail ) {
- // add to avail pool
- if ( log.isDebugEnabled() )
- log.debug( "++++ returning socket (" + socket.toString() + " to avail pool for host: " + host );
- addSocketToPool( availPool, host, socket );
- }
- else {
- deadPool.put( socket, ZERO );
- socket = null;
- }
- }
- }
-
- /**
- * Returns a socket to the avail pool.
- *
- * This is called from SockIO.close(). Calling this method
- * directly without closing the SockIO object first
- * will cause an IOException to be thrown.
- *
- * @param socket socket to return
- */
- private void checkIn( SockIO socket ) {
- checkIn( socket, true );
- }
-
- /**
- * Closes all sockets in the passed in pool.
- *
- * Internal utility method.
- *
- * @param pool pool to close
- */
- protected void closePool( Map> pool ) {
- for ( Iterator i = pool.keySet().iterator(); i.hasNext(); ) {
- String host = i.next();
- Map sockets = pool.get( host );
-
- for ( Iterator j = sockets.keySet().iterator(); j.hasNext(); ) {
- SockIO socket = j.next();
-
- try {
- socket.trueClose();
- }
- catch ( IOException ioe ) {
- log.error( "++++ failed to trueClose socket: " + socket.toString() + " for host: " + host );
- }
-
- j.remove();
- socket = null;
- }
- }
- }
-
- /**
- * Shuts down the pool.
- *
- * Cleanly closes all sockets.
- * Stops the maint thread.
- * Nulls out all internal maps
- */
- public void shutDown() {
- synchronized( this ) {
- if ( log.isDebugEnabled() )
- log.debug( "++++ SockIOPool shutting down..." );
-
- if ( maintThread != null && maintThread.isRunning() ) {
- // stop the main thread
- stopMaintThread();
-
- // wait for the thread to finish
- while ( maintThread.isRunning() ) {
- if ( log.isDebugEnabled() )
- log.debug( "++++ waiting for main thread to finish run +++" );
- try { Thread.sleep( 500 ); } catch ( Exception ex ) { }
- }
- }
-
- if ( log.isDebugEnabled() )
- log.debug( "++++ closing all internal pools." );
- closePool( availPool );
- closePool( busyPool );
- availPool = null;
- busyPool = null;
- buckets = null;
- consistentBuckets = null;
- hostDeadDur = null;
- hostDead = null;
- maintThread = null;
- initialized = false;
- if ( log.isDebugEnabled() )
- log.debug( "++++ SockIOPool finished shutting down." );
- }
- }
-
- /**
- * Starts the maintenance thread.
- *
- * This thread will manage the size of the active pool
- * as well as move any closed, but not checked in sockets
- * back to the available pool.
- */
- protected void startMaintThread() {
-
- if ( maintThread != null ) {
-
- if ( maintThread.isRunning() ) {
- log.error( "main thread already running" );
- }
- else {
- maintThread.start();
- }
- }
- else {
- maintThread = new MaintThread( this );
- maintThread.setInterval( this.maintSleep );
- maintThread.start();
- }
- }
-
- /**
- * Stops the maintenance thread.
- */
- protected void stopMaintThread() {
- if ( maintThread != null && maintThread.isRunning() )
- maintThread.stopThread();
- }
-
- /**
- * Runs self maintenance on all internal pools.
- *
- * This is typically called by the maintenance thread to manage pool size.
- */
- protected void selfMaint() {
- if ( log.isDebugEnabled() )
- log.debug( "++++ Starting self maintenance...." );
-
- // go through avail sockets and create sockets
- // as needed to maintain pool settings
- Map needSockets =
- new HashMap();
-
- synchronized( this ) {
- // find out how many to create
- for ( Iterator i = availPool.keySet().iterator(); i.hasNext(); ) {
- String host = i.next();
- Map sockets = availPool.get( host );
-
- if ( log.isDebugEnabled() )
- log.debug( "++++ Size of avail pool for host (" + host + ") = " + sockets.size() );
-
- // if pool is too small (n < minSpare)
- if ( sockets.size() < minConn ) {
- // need to create new sockets
- int need = minConn - sockets.size();
- needSockets.put( host, need );
- }
- }
- }
-
- // now create
- Map> newSockets =
- new HashMap>();
-
- for ( String host : needSockets.keySet() ) {
- Integer need = needSockets.get( host );
-
- if ( log.isDebugEnabled() )
- log.debug( "++++ Need to create " + need + " new sockets for pool for host: " + host );
-
- Set newSock = new HashSet( need );
- for ( int j = 0; j < need; j++ ) {
- SockIO socket = createSocket( host );
-
- if ( socket == null )
- break;
-
- newSock.add( socket );
- }
-
- newSockets.put( host, newSock );
- }
-
- // synchronize to add and remove to/from avail pool
- // as well as clean up the busy pool (no point in releasing
- // lock here as should be quick to pool adjust and no
- // blocking ops here)
- synchronized( this ) {
- for ( String host : newSockets.keySet() ) {
- Set sockets = newSockets.get( host );
- for ( SockIO socket : sockets )
- addSocketToPool( availPool, host, socket );
- }
-
- for ( Iterator i = availPool.keySet().iterator(); i.hasNext(); ) {
- String host = i.next();
- Map sockets = availPool.get( host );
- if ( log.isDebugEnabled() )
- log.debug( "++++ Size of avail pool for host (" + host + ") = " + sockets.size() );
-
- if ( sockets.size() > maxConn ) {
- // need to close down some sockets
- int diff = sockets.size() - maxConn;
- int needToClose = (diff <= poolMultiplier)
- ? diff
- : (diff) / poolMultiplier;
-
- if ( log.isDebugEnabled() )
- log.debug( "++++ need to remove " + needToClose + " spare sockets for pool for host: " + host );
-
- for ( Iterator j = sockets.keySet().iterator(); j.hasNext(); ) {
- if ( needToClose <= 0 )
- break;
-
- // remove stale entries
- SockIO socket = j.next();
- long expire = sockets.get( socket ).longValue();
-
- // if past idle time
- // then close socket
- // and remove from pool
- if ( (expire + maxIdle) < System.currentTimeMillis() ) {
- if ( log.isDebugEnabled() )
- log.debug( "+++ removing stale entry from pool as it is past its idle timeout and pool is over max spare" );
-
- // remove from the availPool
- deadPool.put( socket, ZERO );
- j.remove();
- needToClose--;
- }
- }
- }
- }
-
- // go through busy sockets and destroy sockets
- // as needed to maintain pool settings
- for ( Iterator i = busyPool.keySet().iterator(); i.hasNext(); ) {
-
- String host = i.next();
- Map sockets = busyPool.get( host );
-
- if ( log.isDebugEnabled() )
- log.debug( "++++ Size of busy pool for host (" + host + ") = " + sockets.size() );
-
- // loop through all connections and check to see if we have any hung connections
- for ( Iterator j = sockets.keySet().iterator(); j.hasNext(); ) {
- // remove stale entries
- SockIO socket = j.next();
- long hungTime = sockets.get( socket ).longValue();
-
- // if past max busy time
- // then close socket
- // and remove from pool
- if ( (hungTime + maxBusyTime) < System.currentTimeMillis() ) {
- log.error( "+++ removing potentially hung connection from busy pool ... socket in pool for " + (System.currentTimeMillis() - hungTime) + "ms" );
-
- // remove from the busy pool
- deadPool.put( socket, ZERO );
- j.remove();
- }
- }
- }
- }
-
- // finally clean out the deadPool
- Set toClose;
- synchronized( deadPool ) {
- toClose = deadPool.keySet();
- deadPool = new IdentityHashMap();
- }
-
- for ( SockIO socket : toClose ) {
- try {
- socket.trueClose( false );
- }
- catch ( Exception ex ) {
- log.error( "++++ failed to close SockIO obj from deadPool" );
- log.error( ex.getMessage(), ex );
- }
-
- socket = null;
- }
-
- if ( log.isDebugEnabled() )
- log.debug( "+++ ending self maintenance." );
- }
-
- /**
- * Class which extends thread and handles maintenance of the pool.
- *
- * @author greg whalin
- * @version 1.5
- */
- protected static class MaintThread extends Thread {
-
- // logger
- private static Logger log =
- Logger.getLogger( MaintThread.class.getName() );
-
- private SockIOPool pool;
- private long interval = 1000 * 3; // every 3 seconds
- private boolean stopThread = false;
- private boolean running;
-
- protected MaintThread( SockIOPool pool ) {
- this.pool = pool;
- this.setDaemon( true );
- this.setName( "MaintThread" );
- }
-
- public void setInterval( long interval ) { this.interval = interval; }
-
- public boolean isRunning() {
- return this.running;
- }
-
- /**
- * sets stop variable
- * and interupts any wait
- */
- public void stopThread() {
- this.stopThread = true;
- this.interrupt();
- }
-
- /**
- * Start the thread.
- */
- public void run() {
- this.running = true;
-
- while ( !this.stopThread ) {
- try {
- Thread.sleep( interval );
-
- // if pool is initialized, then
- // run the maintenance method on itself
- if ( pool.isInitialized() )
- pool.selfMaint();
-
- }
- catch ( Exception e ) {
- break;
- }
- }
-
- this.running = false;
- }
- }
-
- /**
- * MemCached client for Java, utility class for Socket IO.
- *
- * This class is a wrapper around a Socket and its streams.
- *
- * @author greg whalin
- * @author Richard 'toast' Russo
- * @version 1.5
- */
- public static class SockIO implements LineInputStream {
-
- // logger
- private static Logger log =
- Logger.getLogger( SockIO.class.getName() );
-
- // pool
- private SockIOPool pool;
-
- // data
- private String host;
- private Socket sock;
-
- private DataInputStream in;
- private BufferedOutputStream out;
-
- /**
- * creates a new SockIO object wrapping a socket
- * connection to host:port, and its input and output streams
- *
- * @param pool Pool this object is tied to
- * @param host host to connect to
- * @param port port to connect to
- * @param timeout int ms to block on data for read
- * @param connectTimeout timeout (in ms) for initial connection
- * @param noDelay TCP NODELAY option?
- * @throws IOException if an io error occurrs when creating socket
- * @throws UnknownHostException if hostname is invalid
- */
- public SockIO( SockIOPool pool, String host, int port, int timeout, int connectTimeout, boolean noDelay ) throws IOException, UnknownHostException {
-
- this.pool = pool;
-
- // get a socket channel
- sock = getSocket( host, port, connectTimeout );
-
- if ( timeout >= 0 )
- sock.setSoTimeout( timeout );
-
- // testing only
- sock.setTcpNoDelay( noDelay );
-
- // wrap streams
- in = new DataInputStream( new BufferedInputStream( sock.getInputStream() ) );
- out = new BufferedOutputStream( sock.getOutputStream() );
-
- this.host = host + ":" + port;
- }
-
- /**
- * creates a new SockIO object wrapping a socket
- * connection to host:port, and its input and output streams
- *
- * @param host hostname:port
- * @param timeout read timeout value for connected socket
- * @param connectTimeout timeout for initial connections
- * @param noDelay TCP NODELAY option?
- * @throws IOException if an io error occurrs when creating socket
- * @throws UnknownHostException if hostname is invalid
- */
- public SockIO( SockIOPool pool, String host, int timeout, int connectTimeout, boolean noDelay ) throws IOException, UnknownHostException {
-
- this.pool = pool;
-
- String[] ip = host.split(":");
-
- // get socket: default is to use non-blocking connect
- sock = getSocket( ip[ 0 ], Integer.parseInt( ip[ 1 ] ), connectTimeout );
-
- if ( timeout >= 0 )
- this.sock.setSoTimeout( timeout );
-
- // testing only
- sock.setTcpNoDelay( noDelay );
-
- // wrap streams
- in = new DataInputStream( new BufferedInputStream( sock.getInputStream() ) );
- out = new BufferedOutputStream( sock.getOutputStream() );
-
- this.host = host;
- }
-
- /**
- * Method which gets a connection from SocketChannel.
- *
- * @param host host to establish connection to
- * @param port port on that host
- * @param timeout connection timeout in ms
- *
- * @return connected socket
- * @throws IOException if errors connecting or if connection times out
- */
- protected static Socket getSocket( String host, int port, int timeout ) throws IOException {
- SocketChannel sock = SocketChannel.open();
- sock.socket().connect( new InetSocketAddress( host, port ), timeout );
- return sock.socket();
- }
-
- /**
- * Lets caller get access to underlying channel.
- *
- * @return the backing SocketChannel
- */
- public SocketChannel getChannel() { return sock.getChannel(); }
-
- /**
- * returns the host this socket is connected to
- *
- * @return String representation of host (hostname:port)
- */
- public String getHost() { return this.host; }
-
- /**
- * closes socket and all streams connected to it
- *
- * @throws IOException if fails to close streams or socket
- */
- public void trueClose() throws IOException {
- trueClose( true );
- }
-
- /**
- * closes socket and all streams connected to it
- *
- * @throws IOException if fails to close streams or socket
- */
- public void trueClose( boolean addToDeadPool ) throws IOException {
- if ( log.isDebugEnabled() )
- log.debug( "++++ Closing socket for real: " + toString() );
-
- boolean err = false;
- StringBuilder errMsg = new StringBuilder();
-
- if ( in != null ) {
- try {
- in.close();
- }
- catch( IOException ioe ) {
- log.error( "++++ error closing input stream for socket: " + toString() + " for host: " + getHost() );
- log.error( ioe.getMessage(), ioe );
- errMsg.append( "++++ error closing input stream for socket: " + toString() + " for host: " + getHost() + "\n" );
- errMsg.append( ioe.getMessage() );
- err = true;
- }
- }
-
- if ( out != null ) {
- try {
- out.close();
- }
- catch ( IOException ioe ) {
- log.error( "++++ error closing output stream for socket: " + toString() + " for host: " + getHost() );
- log.error( ioe.getMessage(), ioe );
- errMsg.append( "++++ error closing output stream for socket: " + toString() + " for host: " + getHost() + "\n" );
- errMsg.append( ioe.getMessage() );
- err = true;
- }
- }
-
- if ( sock != null ) {
- try {
- sock.close();
- }
- catch ( IOException ioe ) {
- log.error( "++++ error closing socket: " + toString() + " for host: " + getHost() );
- log.error( ioe.getMessage(), ioe );
- errMsg.append( "++++ error closing socket: " + toString() + " for host: " + getHost() + "\n" );
- errMsg.append( ioe.getMessage() );
- err = true;
- }
- }
-
- // check in to pool
- if ( addToDeadPool && sock != null )
- pool.checkIn( this, false );
-
- in = null;
- out = null;
- sock = null;
-
- if ( err )
- throw new IOException( errMsg.toString() );
- }
-
- /**
- * sets closed flag and checks in to connection pool
- * but does not close connections
- */
- void close() {
- // check in to pool
- if ( log.isDebugEnabled() )
- log.debug("++++ marking socket (" + this.toString() + ") as closed and available to return to avail pool");
- pool.checkIn( this );
- }
-
- /**
- * checks if the connection is open
- *
- * @return true if connected
- */
- boolean isConnected() {
- return ( sock != null && sock.isConnected() );
- }
-
- /*
- * checks to see that the connection is still working
- *
- * @return true if still alive
- */
- boolean isAlive() {
-
- if ( !isConnected() )
- return false;
-
- // try to talk to the server w/ a dumb query to ask its version
- try {
- this.write( "version\r\n".getBytes() );
- this.flush();
- String response = this.readLine();
- }
- catch ( IOException ex ) {
- return false;
- }
-
- return true;
- }
-
- /**
- * reads a line
- * intentionally not using the deprecated readLine method from DataInputStream
- *
- * @return String that was read in
- * @throws IOException if io problems during read
- */
- public String readLine() throws IOException {
- if ( sock == null || !sock.isConnected() ) {
- log.error( "++++ attempting to read from closed socket" );
- throw new IOException( "++++ attempting to read from closed socket" );
- }
-
- byte[] b = new byte[1];
- ByteArrayOutputStream bos = new ByteArrayOutputStream();
- boolean eol = false;
-
- while ( in.read( b, 0, 1 ) != -1 ) {
-
- if ( b[0] == 13 ) {
- eol = true;
- }
- else {
- if ( eol ) {
- if ( b[0] == 10 )
- break;
-
- eol = false;
- }
- }
-
- // cast byte into char array
- bos.write( b, 0, 1 );
- }
-
- if ( bos == null || bos.size() <= 0 ) {
- throw new IOException( "++++ Stream appears to be dead, so closing it down" );
- }
-
- // else return the string
- return bos.toString().trim();
- }
-
- /**
- * reads up to end of line and returns nothing
- *
- * @throws IOException if io problems during read
- */
- public void clearEOL() throws IOException {
- if ( sock == null || !sock.isConnected() ) {
- log.error( "++++ attempting to read from closed socket" );
- throw new IOException( "++++ attempting to read from closed socket" );
- }
-
- byte[] b = new byte[1];
- boolean eol = false;
- while ( in.read( b, 0, 1 ) != -1 ) {
-
- // only stop when we see
- // \r (13) followed by \n (10)
- if ( b[0] == 13 ) {
- eol = true;
- continue;
- }
-
- if ( eol ) {
- if ( b[0] == 10 )
- break;
-
- eol = false;
- }
- }
- }
-
- /**
- * reads length bytes into the passed in byte array from dtream
- *
- * @param b byte array
- * @throws IOException if io problems during read
- */
- public int read( byte[] b ) throws IOException {
- if ( sock == null || !sock.isConnected() ) {
- log.error( "++++ attempting to read from closed socket" );
- throw new IOException( "++++ attempting to read from closed socket" );
- }
-
- int count = 0;
- while ( count < b.length ) {
- int cnt = in.read( b, count, (b.length - count) );
- count += cnt;
- }
-
- return count;
- }
-
- /**
- * flushes output stream
- *
- * @throws IOException if io problems during read
- */
- void flush() throws IOException {
- if ( sock == null || !sock.isConnected() ) {
- log.error( "++++ attempting to write to closed socket" );
- throw new IOException( "++++ attempting to write to closed socket" );
- }
- out.flush();
- }
-
- /**
- * writes a byte array to the output stream
- *
- * @param b byte array to write
- * @throws IOException if an io error happens
- */
- void write( byte[] b ) throws IOException {
- if ( sock == null || !sock.isConnected() ) {
- log.error( "++++ attempting to write to closed socket" );
- throw new IOException( "++++ attempting to write to closed socket" );
- }
- out.write( b );
- }
-
- /**
- * use the sockets hashcode for this object
- * so we can key off of SockIOs
- *
- * @return int hashcode
- */
- public int hashCode() {
- return ( sock == null ) ? 0 : sock.hashCode();
- }
-
- /**
- * returns the string representation of this socket
- *
- * @return string
- */
- public String toString() {
- return ( sock == null ) ? "" : sock.toString();
- }
-
- /**
- * Hack to reap any leaking children.
- */
- protected void finalize() throws Throwable {
- try {
- if ( sock != null ) {
- log.error( "++++ closing potentially leaked socket in finalize" );
- sock.close();
- sock = null;
- }
- }
- catch ( Throwable t ) {
- log.error( t.getMessage(), t );
- }
- finally {
- super.finalize();
- }
- }
- }
-}
diff --git a/src/com/meetup/memcached/test/TestMemcached.java b/src/com/meetup/memcached/test/TestMemcached.java
deleted file mode 100644
index b0a9bae..0000000
--- a/src/com/meetup/memcached/test/TestMemcached.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Copyright (c) 2008 Greg Whalin
- * All rights reserved.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the BSD license
- *
- * This library is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE.
- *
- * You should have received a copy of the BSD License along with this
- * library.
- *
- * @author greg whalin
- */
-package com.meetup.memcached.test;
-
-import com.meetup.memcached.*;
-import org.apache.log4j.*;
-
-public class TestMemcached {
- public static void main(String[] args) {
- // memcached should be running on port 11211 but NOT on 11212
-
- BasicConfigurator.configure();
- String[] servers = { "192.168.1.1:1624", "192.168.1.1:1625" };
- SockIOPool pool = SockIOPool.getInstance();
- pool.setServers( servers );
- pool.setFailover( true );
- pool.setInitConn( 10 );
- pool.setMinConn( 5 );
- pool.setMaxConn( 250 );
- pool.setMaintSleep( 30 );
- pool.setNagle( false );
- pool.setSocketTO( 3000 );
- pool.setAliveCheck( true );
- pool.initialize();
-
- MemcachedClient mcc = new MemcachedClient();
-
- // turn off most memcached client logging:
- com.meetup.memcached.Logger.getLogger( MemcachedClient.class.getName() ).setLevel( com.meetup.memcached.Logger.LEVEL_WARN );
-
- for ( int i = 0; i < 10; i++ ) {
- boolean success = mcc.set( "" + i, "Hello!" );
- String result = (String)mcc.get( "" + i );
- System.out.println( String.format( "set( %d ): %s", i, success ) );
- System.out.println( String.format( "get( %d ): %s", i, result ) );
- }
-
- System.out.println( "\n\t -- sleeping --\n" );
- try { Thread.sleep( 10000 ); } catch ( Exception ex ) { }
-
- for ( int i = 0; i < 10; i++ ) {
- boolean success = mcc.set( "" + i, "Hello!" );
- String result = (String)mcc.get( "" + i );
- System.out.println( String.format( "set( %d ): %s", i, success ) );
- System.out.println( String.format( "get( %d ): %s", i, result ) );
- }
- }
-}
diff --git a/src/com/meetup/memcached/test/UnitTests.java b/src/com/meetup/memcached/test/UnitTests.java
deleted file mode 100644
index f9bc089..0000000
--- a/src/com/meetup/memcached/test/UnitTests.java
+++ /dev/null
@@ -1,403 +0,0 @@
-/**
- * Copyright (c) 2008 Greg Whalin
- * All rights reserved.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the BSD license
- *
- * This library is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
- * PURPOSE.
- *
- * You should have received a copy of the BSD License along with this
- * library.
- *
- * @author Kevin Burton
- * @author greg whalin
- */
-package com.meetup.memcached.test;
-
-import com.meetup.memcached.*;
-import java.util.*;
-import java.io.Serializable;
-
-import org.apache.log4j.Level;
-import org.apache.log4j.Logger;
-import org.apache.log4j.BasicConfigurator;
-
-public class UnitTests {
-
- // logger
- private static Logger log =
- Logger.getLogger( UnitTests.class.getName() );
-
- public static MemcachedClient mc = null;
-
- public static void test1() {
- mc.set( "foo", Boolean.TRUE );
- Boolean b = (Boolean)mc.get( "foo" );
- assert b.booleanValue();
- log.error( "+ store/retrieve Boolean type test passed" );
- }
-
- public static void test2() {
- mc.set( "foo", new Integer( Integer.MAX_VALUE ) );
- Integer i = (Integer)mc.get( "foo" );
- assert i.intValue() == Integer.MAX_VALUE;
- log.error( "+ store/retrieve Integer type test passed" );
- }
-
- public static void test3() {
- String input = "test of string encoding";
- mc.set( "foo", input );
- String s = (String)mc.get( "foo" );
- assert s.equals( input );
- log.error( "+ store/retrieve String type test passed" );
- }
-
- public static void test4() {
- mc.set( "foo", new Character( 'z' ) );
- Character c = (Character)mc.get( "foo" );
- assert c.charValue() == 'z';
- log.error( "+ store/retrieve Character type test passed" );
- }
-
- public static void test5() {
- mc.set( "foo", new Byte( (byte)127 ) );
- Byte b = (Byte)mc.get( "foo" );
- assert b.byteValue() == 127;
- log.error( "+ store/retrieve Byte type test passed" );
- }
-
- public static void test6() {
- mc.set( "foo", new StringBuffer( "hello" ) );
- StringBuffer o = (StringBuffer)mc.get( "foo" );
- assert o.toString().equals( "hello" );
- log.error( "+ store/retrieve StringBuffer type test passed" );
- }
-
- public static void test7() {
- mc.set( "foo", new Short( (short)100 ) );
- Short o = (Short)mc.get( "foo" );
- assert o.shortValue() == 100;
- log.error( "+ store/retrieve Short type test passed" );
- }
-
- public static void test8() {
- mc.set( "foo", new Long( Long.MAX_VALUE ) );
- Long o = (Long)mc.get( "foo" );
- assert o.longValue() == Long.MAX_VALUE;
- log.error( "+ store/retrieve Long type test passed" );
- }
-
- public static void test9() {
- mc.set( "foo", new Double( 1.1 ) );
- Double o = (Double)mc.get( "foo" );
- assert o.doubleValue() == 1.1;
- log.error( "+ store/retrieve Double type test passed" );
- }
-
- public static void test10() {
- mc.set( "foo", new Float( 1.1f ) );
- Float o = (Float)mc.get( "foo" );
- assert o.floatValue() == 1.1f;
- log.error( "+ store/retrieve Float type test passed" );
- }
-
- public static void test11() {
- mc.set( "foo", new Integer( 100 ), new Date( System.currentTimeMillis() ));
- try { Thread.sleep( 1000 ); } catch ( Exception ex ) { }
- assert mc.get( "foo" ) == null;
- log.error( "+ store/retrieve w/ expiration test passed" );
- }
-
- public static void test12() {
- long i = 0;
- mc.storeCounter("foo", i);
- mc.incr("foo"); // foo now == 1
- mc.incr("foo", (long)5); // foo now == 6
- long j = mc.decr("foo", (long)2); // foo now == 4
- assert j == 4;
- assert j == mc.getCounter( "foo" );
- log.error( "+ incr/decr test passed" );
- }
-
- public static void test13() {
- Date d1 = new Date();
- mc.set("foo", d1);
- Date d2 = (Date) mc.get("foo");
- assert d1.equals( d2 );
- log.error( "+ store/retrieve Date type test passed" );
- }
-
- public static void test14() {
- assert !mc.keyExists( "foobar123" );
- mc.set( "foobar123", new Integer( 100000) );
- assert mc.keyExists( "foobar123" );
- log.error( "+ store/retrieve test passed" );
-
- assert !mc.keyExists( "counterTest123" );
- mc.storeCounter( "counterTest123", 0 );
- assert mc.keyExists( "counterTest123" );
- log.error( "+ counter store test passed" );
- }
-
- public static void test15() {
-
- Map stats = mc.statsItems();
- assert stats != null;
-
- stats = mc.statsSlabs();
- assert stats != null;
-
- log.error( "+ stats test passed" );
- }
-
- public static void test16() {
- assert !mc.set( "foo", null );
- log.error( "+ invalid data store [null] test passed" );
- }
-
- public static void test17() {
- mc.set( "foo bar", Boolean.TRUE );
- Boolean b = (Boolean)mc.get( "foo bar" );
- assert b.booleanValue();
- log.error( "+ store/retrieve Boolean type test passed" );
- }
-
- public static void test18() {
- long i = 0;
- mc.addOrIncr( "foo" ); // foo now == 0
- mc.incr( "foo" ); // foo now == 1
- mc.incr( "foo", (long)5 ); // foo now == 6
-
- mc.addOrIncr( "foo" ); // foo now 7
-
- long j = mc.decr( "foo", (long)3 ); // foo now == 4
- assert j == 4;
- assert j == mc.getCounter( "foo" );
-
- log.error( "+ incr/decr test passed" );
- }
-
- public static void test19() {
- int max = 100;
- String[] keys = new String[ max ];
- for ( int i=0; i results = mc.getMulti( keys );
- for ( int i=0; i results = mc.getMulti( keys );
- for ( int i=0; i results = mc.getMulti( allKeys );
-
- assert allKeys.length == results.size();
- for ( String key : setKeys ) {
- String val = (String)results.get( key );
- assert key.equals( val );
- }
-
- log.error( "+ getMulti w/ keys that don't exist test passed" );
- }
-
- public static void runAlTests( MemcachedClient mc ) {
- test14();
- for ( int t = 0; t < 2; t++ ) {
- mc.setCompressEnable( ( t&1 ) == 1 );
-
- test1();
- test2();
- test3();
- test4();
- test5();
- test6();
- test7();
- test8();
- test9();
- test10();
- test11();
- test12();
- test13();
- test15();
- test16();
- test17();
- test21();
- test22();
- test23();
- test24();
-
- for ( int i = 0; i < 3; i++ )
- test19();
-
- test20( 8191, 1, 0 );
- test20( 8192, 1, 0 );
- test20( 8193, 1, 0 );
-
- test20( 16384, 100, 0 );
- test20( 17000, 128, 0 );
-
- test20( 128*1024, 1023, 0 );
- test20( 128*1024, 1023, 1 );
- test20( 128*1024, 1024, 0 );
- test20( 128*1024, 1024, 1 );
-
- test20( 128*1024, 1023, 0 );
- test20( 128*1024, 1023, 1 );
- test20( 128*1024, 1024, 0 );
- test20( 128*1024, 1024, 1 );
-
- test20( 900*1024, 32*1024, 0 );
- test20( 900*1024, 32*1024, 1 );
- }
-
- }
-
- /**
- * This runs through some simple tests of the MemcacheClient.
- *
- * Command line args:
- * args[0] = number of threads to spawn
- * args[1] = number of runs per thread
- * args[2] = size of object to store
- *
- * @param args the command line arguments
- */
- public static void main(String[] args) {
-
- BasicConfigurator.configure();
- org.apache.log4j.Logger.getRootLogger().setLevel( Level.WARN );
-
- if ( !UnitTests.class.desiredAssertionStatus() ) {
- System.err.println( "WARNING: assertions are disabled!" );
- try { Thread.sleep( 3000 ); } catch ( InterruptedException e ) {}
- }
-
- String[] serverlist = {
- "192.168.1.50:1620",
- "192.168.1.50:1621",
- "192.168.1.50:1622",
- "192.168.1.50:1623",
- "192.168.1.50:1624",
- "192.168.1.50:1625",
- "192.168.1.50:1626",
- "192.168.1.50:1627",
- "192.168.1.50:1628",
- "192.168.1.50:1629"
- };
-
- Integer[] weights = { 1, 1, 1, 1, 10, 5, 1, 1, 1, 3 };
-
- if ( args.length > 0 )
- serverlist = args;
-
- // initialize the pool for memcache servers
- SockIOPool pool = SockIOPool.getInstance( "test" );
- pool.setServers( serverlist );
- pool.setWeights( weights );
- pool.setMaxConn( 250 );
- pool.setNagle( false );
- pool.setHashingAlg( SockIOPool.CONSISTENT_HASH );
- pool.initialize();
-
- mc = new MemcachedClient( "test" );
- runAlTests( mc );
- }
-
- /**
- * Class for testing serializing of objects.
- *
- * @author $Author: $
- * @version $Revision: $ $Date: $
- */
- public static final class TestClass implements Serializable {
-
- private String field1;
- private String field2;
- private Integer field3;
-
- public TestClass( String field1, String field2, Integer field3 ) {
- this.field1 = field1;
- this.field2 = field2;
- this.field3 = field3;
- }
-
- public String getField1() { return this.field1; }
- public String getField2() { return this.field2; }
- public Integer getField3() { return this.field3; }
-
- public boolean equals( Object o ) {
- if ( this == o ) return true;
- if ( !( o instanceof TestClass ) ) return false;
-
- TestClass obj = (TestClass)o;
-
- return ( ( this.field1 == obj.getField1() || ( this.field1 != null && this.field1.equals( obj.getField1() ) ) )
- && ( this.field2 == obj.getField2() || ( this.field2 != null && this.field2.equals( obj.getField2() ) ) )
- && ( this.field3 == obj.getField3() || ( this.field3 != null && this.field3.equals( obj.getField3() ) ) ) );
- }
- }
-}
diff --git a/src/main/java/com/schooner/MemCached/AbstractTransCoder.java b/src/main/java/com/schooner/MemCached/AbstractTransCoder.java
new file mode 100644
index 0000000..bfa2282
--- /dev/null
+++ b/src/main/java/com/schooner/MemCached/AbstractTransCoder.java
@@ -0,0 +1,69 @@
+/*******************************************************************************
+ * Copyright (c) 2009 Schooner Information Technology, Inc.
+ * All rights reserved.
+ *
+ * http://www.schoonerinfotech.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ ******************************************************************************/
+package com.schooner.MemCached;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * {@link AbstractTransCoder} is nearly the same as the interface
+ * {@link TransCoder}, the only difference is that you needn't return the
+ * written size for memcached set operation.
+ *
+ * @author Xingen Wang
+ * @since 2.5.0
+ * @see TransCoder
+ * @see ObjectTransCoder
+ */
+public abstract class AbstractTransCoder implements TransCoder {
+
+ /*
+ * (non-Javadoc)
+ *
+ * @seecom.schooner.MemCached.TransCoder#encode(com.schooner.MemCached.
+ * SockOutputStream, java.lang.Object)
+ */
+ public int encode(SockOutputStream out, Object object) throws IOException {
+ out.resetCount();
+ encode((OutputStream) out, object);
+ return out.getCount();
+ }
+
+ /**
+ * encode the java object into outputstream.
+ *
+ * @param out
+ * outputstream to hold the data.
+ * @param object
+ * object to be encoded.
+ * @throws IOException
+ */
+ public abstract void encode(OutputStream out, Object object) throws IOException;
+
+}
diff --git a/src/main/java/com/schooner/MemCached/AscIIClient.java b/src/main/java/com/schooner/MemCached/AscIIClient.java
new file mode 100755
index 0000000..724e2f4
--- /dev/null
+++ b/src/main/java/com/schooner/MemCached/AscIIClient.java
@@ -0,0 +1,1763 @@
+/*******************************************************************************
+ * Copyright (c) 2009 Schooner Information Technology, Inc.
+ * All rights reserved.
+ *
+ * http://www.schoonerinfotech.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ ******************************************************************************/
+package com.schooner.MemCached;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
+import java.nio.ByteBuffer;
+import java.nio.channels.SelectionKey;
+import java.nio.channels.Selector;
+import java.nio.channels.SocketChannel;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.zip.GZIPInputStream;
+
+import com.whalin.MemCached.ErrorHandler;
+import com.whalin.MemCached.LineInputStream;
+import com.whalin.MemCached.MemCachedClient;
+
+/**
+ * This client implements the text protocol of memcached in a very high
+ * performance way.
+ *
+ * Please use the wrapper class {@link MemCachedClient} for accessing the
+ * memcached server.
+ *
+ * @author Xingen Wang
+ * @since 2.5.0
+ * @see BinaryClient
+ */
+public class AscIIClient extends MemCachedClient {
+ private TransCoder transCoder = new ObjectTransCoder();
+
+ // pool instance
+ private SchoonerSockIOPool pool;
+
+ // which pool to use
+ private String poolName;
+
+ // flags
+ private boolean sanitizeKeys;
+ private boolean primitiveAsString;
+ @SuppressWarnings("unused")
+ private boolean compressEnable;
+ @SuppressWarnings("unused")
+ private long compressThreshold;
+ private String defaultEncoding;
+
+ public boolean isUseBinaryProtocol() {
+ return false;
+ }
+
+ /**
+ * Creates a new instance of MemCachedClient.
+ */
+ public AscIIClient() {
+ this(null);
+ }
+
+ /**
+ * Creates a new instance of MemCachedClient accepting a passed in pool
+ * name.
+ *
+ * @param poolName
+ * name of SockIOPool
+ * @param binaryProtocal
+ * whether use binary protocol.
+ */
+ public AscIIClient(String poolName) {
+ super((MemCachedClient) null);
+ this.poolName = poolName;
+ init();
+ }
+
+ public AscIIClient(String poolName, ClassLoader cl, ErrorHandler eh) {
+ super((MemCachedClient) null);
+ this.poolName = poolName;
+ this.classLoader = cl;
+ this.errorHandler = eh;
+ init();
+ }
+
+ /**
+ * Initializes client object to defaults.
+ *
+ * This enables compression and sets compression threshhold to 15 KB.
+ */
+ private void init() {
+ this.sanitizeKeys = true;
+ this.primitiveAsString = false;
+ this.compressEnable = false;
+ this.compressThreshold = COMPRESS_THRESH;
+ this.defaultEncoding = "UTF-8";
+ this.poolName = (this.poolName == null) ? "default" : this.poolName;
+
+ // get a pool instance to work with for the life of this instance
+ this.pool = SchoonerSockIOPool.getInstance(poolName);
+ }
+
+ public boolean keyExists(String key) {
+ return (this.get(key, null) != null);
+ }
+
+ public boolean delete(String key) {
+ return delete(key, null, null);
+ }
+
+ public boolean delete(String key, Date expiry) {
+ return delete(key, null, expiry);
+ }
+
+ public boolean delete(String key, Integer hashCode, Date expiry) {
+
+ if (key == null) {
+ log.error("null value for key passed to delete()");
+ return false;
+ }
+
+ try {
+ key = sanitizeKey(key);
+ } catch (UnsupportedEncodingException e) {
+
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnDelete(this, e, key);
+
+ log.error("failed to sanitize your key!", e);
+ return false;
+ }
+
+ // get SockIO obj from hash or from key
+ SchoonerSockIO sock = pool.getSock(key, hashCode);
+
+ // return false if unable to get SockIO obj
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnDelete(this, new IOException("no socket to server available"), key);
+ return false;
+ }
+
+ // build command
+ StringBuilder command = new StringBuilder("delete ").append(key);
+ if (expiry != null)
+ command.append(" " + expiry.getTime() / 1000);
+
+ command.append("\r\n");
+
+ try {
+ sock.write(command.toString().getBytes());
+
+ // if we get appropriate response back, then we return true
+ // get result code
+ SockInputStream sis = new SockInputStream(sock, Integer.MAX_VALUE);
+ String line = sis.getLine();
+ sis.close();
+ if (DELETED.equals(line)) { // successful
+ log.debug(new StringBuffer().append("++++ deletion of key: ").append(key)
+ .append(" from cache was a success").toString());
+ return true;
+ } else if (NOTFOUND.equals(line)) { // key not found
+ log.debug(new StringBuffer().append("++++ deletion of key: ").append(key)
+ .append(" from cache failed as the key was not found").toString());
+ } else { // other error information
+ if (log.isErrorEnabled()) {
+ log.error(new StringBuffer().append("++++ error deleting key: ").append(key).toString());
+ log.error(new StringBuffer().append("++++ server response: ").append(line).toString());
+ }
+ }
+ } catch (IOException e) {
+
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnDelete(this, e, key);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on delete");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString());
+ }
+
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+
+ return false;
+ }
+
+ public boolean set(String key, Object value) {
+ return set("set", key, value, null, null, 0L, primitiveAsString);
+ }
+
+ public boolean set(String key, Object value, Integer hashCode) {
+ return set("set", key, value, null, hashCode, 0L, primitiveAsString);
+ }
+
+ public boolean set(String key, Object value, Date expiry) {
+ return set("set", key, value, expiry, null, 0L, primitiveAsString);
+ }
+
+ public boolean set(String key, Object value, Date expiry, Integer hashCode) {
+ return set("set", key, value, expiry, hashCode, 0L, primitiveAsString);
+ }
+
+ public boolean set(String key, Object value, Date expiry, Integer hashCode, boolean asString) {
+ return set("set", key, value, expiry, hashCode, 0L, asString);
+ }
+
+ public boolean add(String key, Object value) {
+ return set("add", key, value, null, null, 0L, primitiveAsString);
+ }
+
+ public boolean add(String key, Object value, Integer hashCode) {
+ return set("add", key, value, null, hashCode, 0L, primitiveAsString);
+ }
+
+ public boolean add(String key, Object value, Date expiry) {
+ return set("add", key, value, expiry, null, 0L, primitiveAsString);
+ }
+
+ public boolean add(String key, Object value, Date expiry, Integer hashCode) {
+ return set("add", key, value, expiry, hashCode, 0L, primitiveAsString);
+ }
+
+ public boolean append(String key, Object value, Integer hashCode) {
+ return set("append", key, value, null, hashCode, 0L, primitiveAsString);
+ }
+
+ public boolean append(String key, Object value) {
+ return set("append", key, value, null, null, 0L, primitiveAsString);
+ }
+
+ public boolean cas(String key, Object value, Integer hashCode, long casUnique) {
+ return set("cas", key, value, null, hashCode, casUnique, primitiveAsString);
+ }
+
+ public boolean cas(String key, Object value, Date expiry, long casUnique) {
+ return set("cas", key, value, expiry, null, casUnique, primitiveAsString);
+ }
+
+ public boolean cas(String key, Object value, Date expiry, Integer hashCode, long casUnique) {
+ return set("cas", key, value, expiry, hashCode, casUnique, primitiveAsString);
+ }
+
+ public boolean cas(String key, Object value, long casUnique) {
+ return set("cas", key, value, null, null, casUnique, primitiveAsString);
+ }
+
+ public boolean prepend(String key, Object value, Integer hashCode) {
+ return set("prepend", key, value, null, hashCode, 0L, primitiveAsString);
+ }
+
+ public boolean prepend(String key, Object value) {
+ return set("prepend", key, value, null, null, 0L, primitiveAsString);
+ }
+
+ public boolean replace(String key, Object value) {
+ return set("replace", key, value, null, null, 0L, primitiveAsString);
+ }
+
+ public boolean replace(String key, Object value, Integer hashCode) {
+ return set("replace", key, value, null, hashCode, 0L, primitiveAsString);
+ }
+
+ public boolean replace(String key, Object value, Date expiry) {
+ return set("replace", key, value, expiry, null, 0L, primitiveAsString);
+ }
+
+ public boolean replace(String key, Object value, Date expiry, Integer hashCode) {
+ return set("replace", key, value, expiry, hashCode, 0L, primitiveAsString);
+ }
+
+ /**
+ * Stores data to cache.
+ *
+ * If data does not already exist for this key on the server, or if the key
+ * is being
+ * deleted, the specified value will not be stored.
+ * The server will automatically delete the value when the expiration time
+ * has been reached.
+ *
+ * If compression is enabled, and the data is longer than the compression
+ * threshold
+ * the data will be stored in compressed form.
+ *
+ * As of the current release, all objects stored will use java
+ * serialization.
+ *
+ * @param cmdname
+ * action to take (set, add, replace)
+ * @param key
+ * key to store cache under
+ * @param value
+ * object to cache
+ * @param expiry
+ * expiration
+ * @param hashCode
+ * if not null, then the int hashcode to use
+ * @param asString
+ * if true, then store all primitives as their string value
+ * @return true/false indicating success
+ */
+ private boolean set(String cmdname, String key, Object value, Date expiry, Integer hashCode, Long casUnique,
+ boolean asString) {
+
+ if (cmdname == null || key == null) {
+ log.error("key is null or cmd is null/empty for set()");
+ return false;
+ }
+
+ try {
+ key = sanitizeKey(key);
+ } catch (UnsupportedEncodingException e) {
+
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnSet(this, e, key);
+ log.error("failed to sanitize your key!", e);
+ return false;
+ }
+
+ if (value == null) {
+ log.error("trying to store a null value to cache");
+ return false;
+ }
+
+ // get SockIO obj
+ SchoonerSockIO sock = pool.getSock(key, hashCode);
+
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnSet(this, new IOException("no socket to server available"), key);
+ return false;
+ }
+
+ if (expiry == null)
+ expiry = new Date(0);
+
+ // store flags
+ int flags = asString ? MemCachedClient.MARKER_STRING : NativeHandler.getMarkerFlag(value);
+ // construct the command
+ String cmd = new StringBuffer().append(cmdname).append(" ").append(key).append(" ").append(flags).append(" ")
+ .append(expiry.getTime() / 1000).append(" ").toString();
+
+ try {
+ sock.writeBuf.clear();
+ sock.writeBuf.put(cmd.getBytes());
+ int offset = sock.writeBuf.position();
+ // write blank bytes size.
+ sock.writeBuf.put(BLAND_DATA_SIZE);
+ if (casUnique != 0)
+ sock.writeBuf.put((" " + casUnique.toString()).getBytes());
+ sock.writeBuf.put(B_RETURN);
+ SockOutputStream output = new SockOutputStream(sock);
+ int valLen = 0;
+ if (flags != MARKER_OTHERS) {
+ byte[] b;
+ if (asString) {
+ b = value.toString().getBytes(defaultEncoding);
+ } else {
+ /*
+ * Using NativeHandler to serialize the value
+ */
+ b = NativeHandler.encode(value);
+ }
+ output.write(b);
+ valLen = b.length;
+ } else {
+ /*
+ * Using default object transcoder to serialize the
+ * non-primitive values.
+ */
+ valLen = transCoder.encode(output, value);
+ }
+ sock.writeBuf.put(B_RETURN);
+ // write serialized object
+ byte[] objectSize = new Integer(valLen).toString().getBytes();
+ int oldPosition = sock.writeBuf.position();
+ sock.writeBuf.position(offset);
+ // put real object bytes size
+ sock.writeBuf.put(objectSize);
+ // return to correct position.
+ sock.writeBuf.position(oldPosition);
+
+ // write the buffer to server
+ // now write the data to the cache server
+ sock.flush();
+ // get result code
+ SockInputStream sis = new SockInputStream(sock, Integer.MAX_VALUE);
+ String line = sis.getLine();
+ sis.close();
+ if (STORED.equals(line)) {
+ /*
+ * Successfully set here.
+ */
+ return true;
+ }
+ } catch (Exception e) {
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnSet(this, e, key);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on set");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString());
+ }
+
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+
+ return false;
+ }
+
+ public long addOrIncr(String key) {
+ return addOrIncr(key, 0, null);
+ }
+
+ public long addOrIncr(String key, long inc) {
+ return addOrIncr(key, inc, null);
+ }
+
+ public long addOrIncr(String key, long inc, Integer hashCode) {
+ boolean ret = add(key, "" + inc, hashCode);
+
+ if (ret) {
+ return inc;
+ } else {
+ return incrdecr("incr", key, inc, hashCode);
+ }
+ }
+
+ public long addOrDecr(String key) {
+ return addOrDecr(key, 0, null);
+ }
+
+ public long addOrDecr(String key, long inc) {
+ return addOrDecr(key, inc, null);
+ }
+
+ public long addOrDecr(String key, long inc, Integer hashCode) {
+ boolean ret = add(key, "" + inc, hashCode);
+ if (ret) {
+ return inc;
+ } else {
+ return incrdecr("decr", key, inc, hashCode);
+ }
+ }
+
+ public long incr(String key) {
+ return incrdecr("incr", key, 1, null);
+ }
+
+ public long incr(String key, long inc) {
+ return incrdecr("incr", key, inc, null);
+ }
+
+ public long incr(String key, long inc, Integer hashCode) {
+ return incrdecr("incr", key, inc, hashCode);
+ }
+
+ public long decr(String key) {
+ return incrdecr("decr", key, 1, null);
+ }
+
+ public long decr(String key, long inc) {
+ return incrdecr("decr", key, inc, null);
+ }
+
+ public long decr(String key, long inc, Integer hashCode) {
+ return incrdecr("decr", key, inc, hashCode);
+ }
+
+ /**
+ * Increments/decrements the value at the specified key by inc.
+ *
+ * Note that the server uses a 32-bit unsigned integer, and checks for
+ * underflow. In the event of underflow, the result will be zero. Because
+ * Java lacks unsigned types, the value is returned as a 64-bit integer.
+ * The server will only decrement a value if it already exists;
+ * if a value is not found, -1 will be returned.
+ *
+ * @param cmdname
+ * increment/decrement
+ * @param key
+ * cache key
+ * @param inc
+ * amount to incr or decr
+ * @param hashCode
+ * if not null, then the int hashcode to use
+ * @return new value or -1 if not exist
+ */
+ private long incrdecr(String cmdname, String key, long inc, Integer hashCode) {
+
+ if (key == null) {
+ log.error("null key for incrdecr()");
+ return -1;
+ }
+
+ try {
+ key = sanitizeKey(key);
+ } catch (UnsupportedEncodingException e) {
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, e, key);
+ log.error("failed to sanitize your key!", e);
+ return -1;
+ }
+
+ // get SockIO obj for given cache key
+ SchoonerSockIO sock = pool.getSock(key, hashCode);
+
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnSet(this, new IOException("no socket to server available"), key);
+ return -1;
+ }
+
+ try {
+ String cmd = new StringBuffer().append(cmdname).append(" ").append(key).append(" ").append(inc)
+ .append("\r\n").toString();
+ sock.write(cmd.getBytes());
+ // get result code
+ SockInputStream sis = new SockInputStream(sock, Integer.MAX_VALUE);
+ String line = sis.getLine().split("\r\n")[0];
+ sis.close();
+ if (line.matches("\\d+")) {
+ // Sucessfully increase.
+ // return sock to pool and return result
+ return Long.parseLong(line);
+ } else if (NOTFOUND.equals(line + "\r\n")) {
+ log.info(new StringBuffer().append("++++ key not found to incr/decr for key: ").append(key).toString());
+ } else {
+ if (log.isErrorEnabled()) {
+ log.error(new StringBuffer().append("++++ error incr/decr key: ").append(key).toString());
+ log.error(new StringBuffer().append("++++ server response: ").append(line).toString());
+ }
+ }
+ } catch (Exception e) {
+
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, e, key);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on incr/decr");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString());
+ }
+
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+
+ return -1;
+ }
+
+ public Object get(String key) {
+ return get(key, null);
+ }
+
+ public Object get(String key, Integer hashCode) {
+ return get("get", key, hashCode, false);
+ }
+
+ public MemcachedItem gets(String key) {
+ return gets(key, null);
+ }
+
+ public MemcachedItem gets(String key, Integer hashCode) {
+ return gets("gets", key, hashCode, false);
+ }
+
+ /**
+ * Retrieve a key from the server, using a specific hash.
+ *
+ * If the data was compressed or serialized when compressed, it will
+ * automatically
+ * be decompressed or serialized, as appropriate. (Inclusive or)
+ *
+ * Non-serialized data will be returned as a string, so explicit conversion
+ * to
+ * numeric types will be necessary, if desired
+ *
+ * @param key
+ * key where data is stored
+ * @param hashCode
+ * if not null, then the int hashcode to use
+ * @param asString
+ * if true, then return string val
+ * @return the object that was previously stored, or null if it was not
+ * previously stored
+ */
+ public Object get(String key, Integer hashCode, boolean asString) {
+ return get("get", key, hashCode, asString);
+ }
+
+ /**
+ * get memcached item from server.
+ *
+ * @param cmd
+ * cmd to be used, get/gets
+ * @param key
+ * specified key
+ * @param hashCode
+ * specified hashcode
+ * @return memcached item with value in it.
+ */
+ private Object get(String cmd, String key, Integer hashCode, boolean asString) {
+
+ if (key == null) {
+ log.error("key is null for get()");
+ return null;
+ }
+
+ try {
+ key = sanitizeKey(key);
+ } catch (UnsupportedEncodingException e) {
+ log.error("failed to sanitize your key!", e);
+ return null;
+ }
+
+ // get SockIO obj using cache key
+ SchoonerSockIO sock = pool.getSock(key, hashCode);
+
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, new IOException("no socket to server available"), key);
+ return null;
+ }
+
+ String cmdLine = cmd + " " + key;
+
+ try {
+ sock.writeBuf.clear();
+ sock.writeBuf.put(cmdLine.getBytes());
+ sock.writeBuf.put(B_RETURN);
+ // write buffer to server
+ sock.flush();
+
+ int dataSize = 0;
+ int flag = 0;
+
+ // get result code
+ SockInputStream input = new SockInputStream(sock, Integer.MAX_VALUE);
+ // Then analysis the return metadata from server
+ // including key, flag and data size
+ boolean stop = false;
+ StringBuffer sb = new StringBuffer();
+ int b;
+ int index = 0;
+ while (!stop) {
+ /*
+ * Critical block to parse the response header.
+ */
+ b = input.read();
+ if (b == ' ' || b == '\r') {
+ switch (index) {
+ case 0:
+ if (END.startsWith(sb.toString()))
+ return null;
+ case 1:
+ break;
+ case 2:
+ flag = Integer.parseInt(sb.toString());
+ break;
+ case 3:
+ // get the data size
+ dataSize = Integer.parseInt(sb.toString());
+ break;
+ }
+ index++;
+ sb = new StringBuffer();
+ if (b == '\r') {
+ input.read();
+ stop = true;
+ }
+ continue;
+ }
+ sb.append((char) b);
+ }
+
+ Object o = null;
+ input.willRead(dataSize);
+ // we can only take out serialized objects
+ if (dataSize > 0) {
+ if (NativeHandler.isHandled(flag)) {
+ // decoding object
+ byte[] buf = input.getBuffer();
+ if ((flag & F_COMPRESSED) == F_COMPRESSED) {
+ GZIPInputStream gzi = new GZIPInputStream(new ByteArrayInputStream(buf));
+ ByteArrayOutputStream bos = new ByteArrayOutputStream(buf.length);
+ int count;
+ byte[] tmp = new byte[2048];
+ while ((count = gzi.read(tmp)) != -1) {
+ bos.write(tmp, 0, count);
+ }
+ // store uncompressed back to buffer
+ buf = bos.toByteArray();
+ gzi.close();
+ }
+ if (primitiveAsString || asString) {
+ o = new String(buf, defaultEncoding);
+ } else
+ o = NativeHandler.decode(buf, flag);
+ } else if (transCoder != null) {
+ // decode object with default transcoder.
+ InputStream in = input;
+ if ((flag & F_COMPRESSED) == F_COMPRESSED)
+ in = new GZIPInputStream(in);
+ if (classLoader == null)
+ o = transCoder.decode(in);
+ else
+ o = ((ObjectTransCoder) transCoder).decode(in, classLoader);
+ }
+ }
+ input.willRead(Integer.MAX_VALUE);
+ // Skip "\r\n" after each data block for VALUE
+ input.getLine();
+ // Skip "END\r\n" after get
+ input.getLine();
+ return o;
+ } catch (Exception ce) {
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, ce, key);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while trying to get object from cache for key: " + key);
+ log.error(ce.getMessage(), ce);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString());
+ }
+
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+
+ return null;
+
+ }
+
+ public MemcachedItem gets(String cmd, String key, Integer hashCode, boolean asString) {
+
+ if (key == null) {
+ log.error("key is null for get()");
+ return null;
+ }
+
+ try {
+ key = sanitizeKey(key);
+ } catch (UnsupportedEncodingException e) {
+ log.error("failed to sanitize your key!", e);
+ return null;
+ }
+
+ // get SockIO obj using cache key
+ SchoonerSockIO sock = pool.getSock(key, hashCode);
+
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, new IOException("no socket to server available"), key);
+ return null;
+ }
+
+ String cmdLine = cmd + " " + key;
+
+ try {
+ sock.writeBuf.clear();
+ sock.writeBuf.put(cmdLine.getBytes());
+ sock.writeBuf.put(B_RETURN);
+ // write buffer to server
+ sock.flush();
+
+ int dataSize = 0;
+ int flag = 0;
+ MemcachedItem item = new MemcachedItem();
+
+ // get result code
+ SockInputStream input = new SockInputStream(sock, Integer.MAX_VALUE);
+ // Then analysis the return metadata from server
+ // including key, flag and data size
+ boolean stop = false;
+ StringBuffer sb = new StringBuffer();
+ int b;
+ int index = 0;
+ while (!stop) {
+ /*
+ * Critical block to parse the response header.
+ */
+ b = input.read();
+ if (b == ' ' || b == '\r') {
+ switch (index) {
+ case 0:
+ if (END.startsWith(sb.toString()))
+ return null;
+ case 1:
+ break;
+ case 2:
+ flag = Integer.parseInt(sb.toString());
+ break;
+ case 3:
+ // get the data size
+ dataSize = Integer.parseInt(sb.toString());
+ break;
+ case 4:
+ if (cmd.equals("gets"))
+ item.casUnique = Long.parseLong(sb.toString());
+ break;
+ }
+ index++;
+ sb = new StringBuffer();
+ if (b == '\r') {
+ input.read();
+ stop = true;
+ }
+ continue;
+ }
+ sb.append((char) b);
+ }
+ Object o = null;
+ input.willRead(dataSize);
+ // we can only take out serialized objects
+ if (dataSize > 0) {
+ if (NativeHandler.isHandled(flag)) {
+ byte[] buf = input.getBuffer();
+ if ((flag & F_COMPRESSED) == F_COMPRESSED) {
+ GZIPInputStream gzi = new GZIPInputStream(new ByteArrayInputStream(buf));
+ ByteArrayOutputStream bos = new ByteArrayOutputStream(buf.length);
+ int count;
+ byte[] tmp = new byte[2048];
+ while ((count = gzi.read(tmp)) != -1) {
+ bos.write(tmp, 0, count);
+ }
+ // store uncompressed back to buffer
+ buf = bos.toByteArray();
+ gzi.close();
+ }
+ if (primitiveAsString || asString) {
+ o = new String(buf, defaultEncoding);
+ } else
+ // decoding object
+ o = NativeHandler.decode(buf, flag);
+ } else if (transCoder != null) {
+ InputStream in = input;
+ if ((flag & F_COMPRESSED) == F_COMPRESSED)
+ in = new GZIPInputStream(in);
+ // decode object with default transcoder.
+ o = transCoder.decode(in);
+ }
+ }
+ item.value = o;
+ input.willRead(Integer.MAX_VALUE);
+ // Skip "\r\n" after each data block for VALUE
+ input.getLine();
+ // Skip "END\r\n" after get
+ input.getLine();
+ return item;
+
+ } catch (Exception ce) {
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, ce, key);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while trying to get object from cache for key: " + key);
+ log.error(ce.getMessage(), ce);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString());
+ }
+
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+
+ return null;
+ }
+
+ /**
+ * set transcoder. TransCoder is used to customize the serialization and
+ * deserialization.
+ *
+ * @param transCoder
+ */
+ public void setTransCoder(TransCoder transCoder) {
+ this.transCoder = transCoder;
+ }
+
+ public Object[] getMultiArray(String[] keys) {
+ return getMultiArray(keys, null);
+ }
+
+ public Object[] getMultiArray(String[] keys, Integer[] hashCodes) {
+
+ Map data = getMulti(keys, hashCodes);
+
+ if (data == null)
+ return null;
+
+ Object[] res = new Object[keys.length];
+ for (int i = 0; i < keys.length; i++) {
+ res[i] = data.get(keys[i]);
+ }
+
+ return res;
+ }
+
+ /**
+ * Retrieve multiple objects from the memcache.
+ *
+ * This is recommended over repeated calls to {@link #get(String) get()},
+ * since it
+ * is more efficient.
+ *
+ * @param keys
+ * String array of keys to retrieve
+ * @param hashCodes
+ * if not null, then the Integer array of hashCodes
+ * @param asString
+ * if true, retrieve string vals
+ * @return Object array ordered in same order as key array containing
+ * results
+ */
+ public Object[] getMultiArray(String[] keys, Integer[] hashCodes, boolean asString) {
+
+ Map data = getMulti(keys, hashCodes, asString);
+
+ if (data == null)
+ return null;
+
+ Object[] res = new Object[keys.length];
+ for (int i = 0; i < keys.length; i++) {
+ res[i] = data.get(keys[i]);
+ }
+
+ return res;
+ }
+
+ public Map getMulti(String[] keys) {
+ return getMulti(keys, null);
+ }
+
+ public Map getMulti(String[] keys, Integer[] hashCodes) {
+ return getMulti(keys, hashCodes, false);
+ }
+
+ /**
+ * Retrieve multiple keys from the memcache.
+ *
+ * This is recommended over repeated calls to {@link #get(String) get()},
+ * since it
+ * is more efficient.
+ *
+ * @param keys
+ * keys to retrieve
+ * @param hashCodes
+ * if not null, then the Integer array of hashCodes
+ * @param asString
+ * if true then retrieve using String val
+ * @return a hashmap with entries for each key is found by the server, keys
+ * that are not found are not entered into the hashmap, but
+ * attempting to retrieve them from the hashmap gives you null.
+ */
+ public Map getMulti(String[] keys, Integer[] hashCodes, boolean asString) {
+
+ if (keys == null || keys.length == 0) {
+ log.error("missing keys for getMulti()");
+ return null;
+ }
+
+ Map cmdMap = new HashMap();
+ String[] cleanKeys = new String[keys.length];
+ for (int i = 0; i < keys.length; ++i) {
+ String key = keys[i];
+ if (key == null) {
+ log.error("null key, so skipping");
+ continue;
+ }
+
+ Integer hash = null;
+ if (hashCodes != null && hashCodes.length > i)
+ hash = hashCodes[i];
+
+ cleanKeys[i] = key;
+ try {
+ cleanKeys[i] = sanitizeKey(key);
+ } catch (UnsupportedEncodingException e) {
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, e, key);
+ log.error("failed to sanitize your key!", e);
+ continue;
+ }
+
+ // get SockIO obj from cache key
+ SchoonerSockIO sock = pool.getSock(cleanKeys[i], hash);
+
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, new IOException("no socket to server available"), key);
+ continue;
+ }
+
+ // store in map and list if not already
+ if (!cmdMap.containsKey(sock.getHost()))
+ cmdMap.put(sock.getHost(), new StringBuilder("get"));
+
+ cmdMap.get(sock.getHost()).append(" " + cleanKeys[i]);
+
+ // return to pool
+ sock.close();
+ }
+
+ log.debug("multi get socket count : " + cmdMap.size());
+
+ // now query memcache
+ Map ret = new HashMap(keys.length);
+
+ // now use new NIO implementation
+ (new NIOLoader(this)).doMulti(asString, cmdMap, keys, ret);
+
+ // fix the return array in case we had to rewrite any of the keys
+ for (int i = 0; i < keys.length; ++i) {
+
+ // if key!=cleanKey and result has cleankey
+ if (!keys[i].equals(cleanKeys[i]) && ret.containsKey(cleanKeys[i])) {
+ ret.put(keys[i], ret.get(cleanKeys[i]));
+ ret.remove(cleanKeys[i]);
+ }
+
+ // backfill missing keys w/ null value
+ // if (!ret.containsKey(keys[i]))
+ // ret.put(keys[i], null);
+ }
+
+ log.debug("++++ memcache: got back " + ret.size() + " results");
+ return ret;
+ }
+
+ /**
+ * This method loads the data from cache into a Map.
+ *
+ * Pass a SockIO object which is ready to receive data and a HashMap
+ * to store the results.
+ *
+ * @param sock
+ * socket waiting to pass back data
+ * @param hm
+ * hashmap to store data into
+ * @param asString
+ * if true, and if we are using NativehHandler, return string val
+ * @throws IOException
+ * if io exception happens while reading from socket
+ */
+ private void loadMulti(LineInputStream input, Map hm, boolean asString) throws IOException {
+
+ while (true) {
+ String line = input.readLine();
+
+ if (line.startsWith(VALUE)) {
+ String[] info = line.split(" ");
+ String key = info[1];
+ int flag = Integer.parseInt(info[2]);
+ int length = Integer.parseInt(info[3]);
+
+ // read obj into buffer
+ byte[] buf = new byte[length];
+ input.read(buf);
+ input.clearEOL();
+
+ // ready object
+ Object o = null;
+ // we can only take out serialized objects
+ if ((flag & F_COMPRESSED) == F_COMPRESSED) {
+ GZIPInputStream gzi = new GZIPInputStream(new ByteArrayInputStream(buf));
+ ByteArrayOutputStream bos = new ByteArrayOutputStream(buf.length);
+ int count;
+ byte[] tmp = new byte[2048];
+ while ((count = gzi.read(tmp)) != -1) {
+ bos.write(tmp, 0, count);
+ }
+ // store uncompressed back to buffer
+ buf = bos.toByteArray();
+ gzi.close();
+ }
+ if (flag != MARKER_OTHERS) {
+ if (primitiveAsString || asString) {
+ // pulling out string value
+ o = new String(buf, defaultEncoding);
+ } else {
+ // decoding object
+ try {
+ o = NativeHandler.decode(buf, flag);
+ } catch (Exception e) {
+ log.error("++++ Exception thrown while trying to deserialize for key: " + key + " -- "
+ + e.getMessage());
+ e.printStackTrace();
+ }
+ }
+ } else if (transCoder != null) {
+ o = transCoder.decode(new ByteArrayInputStream(buf));
+ }
+
+ // store the object into the cache
+ hm.put(key, o);
+ } else if (END.startsWith(line)) {
+ break;
+ }
+ }
+ }
+
+ public boolean flushAll() {
+ return flushAll(null);
+ }
+
+ public boolean flushAll(String[] servers) {
+
+ // get SockIOPool instance
+ // return false if unable to get SockIO obj
+ if (pool == null) {
+ log.error("++++ unable to get SockIOPool instance");
+ return false;
+ }
+
+ // get all servers and iterate over them
+ servers = (servers == null) ? pool.getServers() : servers;
+
+ // if no servers, then return early
+ if (servers == null || servers.length <= 0) {
+ log.error("++++ no servers to flush");
+ return false;
+ }
+
+ boolean success = true;
+
+ for (int i = 0; i < servers.length; i++) {
+
+ SchoonerSockIO sock = pool.getConnection(servers[i]);
+ if (sock == null) {
+ log.error("++++ unable to get connection to : " + servers[i]);
+ success = false;
+ if (errorHandler != null)
+ errorHandler.handleErrorOnFlush(this, new IOException("no socket to server available"));
+ continue;
+ }
+
+ // build command
+ String command = "flush_all\r\n";
+
+ try {
+ sock.write(command.getBytes());
+ // if we get appropriate response back, then we return true
+ // get result code
+ SockInputStream sis = new SockInputStream(sock, Integer.MAX_VALUE);
+ String line = sis.getLine();
+ sis.close();
+ success = (OK.equals(line)) ? success && true : false;
+ } catch (IOException e) {
+
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnFlush(this, e);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on flushAll");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString());
+ }
+
+ success = false;
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+ }
+
+ return success;
+ }
+
+ public Map> stats() {
+ return stats(null);
+ }
+
+ public Map> stats(String[] servers) {
+ return stats(servers, "stats\r\n", STATS);
+ }
+
+ public Map> statsItems() {
+ return statsItems(null);
+ }
+
+ public Map> statsItems(String[] servers) {
+ return stats(servers, "stats items\r\n", STATS);
+ }
+
+ public Map> statsSlabs() {
+ return statsSlabs(null);
+ }
+
+ public Map> statsSlabs(String[] servers) {
+ return stats(servers, "stats slabs\r\n", STATS);
+ }
+
+ public Map> statsCacheDump(int slabNumber, int limit) {
+ return statsCacheDump(null, slabNumber, limit);
+ }
+
+ public Map> statsCacheDump(String[] servers, int slabNumber, int limit) {
+ return stats(servers, String.format("stats cachedump %d %d\r\n", slabNumber, limit), ITEM);
+ }
+
+ private Map> stats(String[] servers, String command, String lineStart) {
+
+ if (command == null || command.trim().equals("")) {
+ log.error("++++ invalid / missing command for stats()");
+ return null;
+ }
+
+ // get all servers and iterate over them
+ servers = (servers == null) ? pool.getServers() : servers;
+
+ // if no servers, then return early
+ if (servers == null || servers.length <= 0) {
+ log.error("++++ no servers to check stats");
+ return null;
+ }
+
+ // array of stats Maps
+ Map> statsMaps = new HashMap>();
+
+ for (int i = 0; i < servers.length; i++) {
+
+ SchoonerSockIO sock = pool.getConnection(servers[i]);
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnStats(this, new IOException("no socket to server available"));
+ continue;
+ }
+
+ // build command
+ try {
+ sock.write(command.getBytes());
+
+ // map to hold key value pairs
+ Map stats = new HashMap();
+ // get result code
+ SockInputStream input = new SockInputStream(sock, Integer.MAX_VALUE);
+ String line;
+ // loop over results
+ while ((line = input.getLine()) != null) {
+
+ if (line.startsWith(lineStart)) {
+ String[] info = line.split(" ", 3);
+ String key = info.length > 1 ? info[1] : null;
+ String value = info.length > 2 ? info[2] : null;
+ stats.put(key, value);
+ } else if (END.startsWith(line)) {
+ // finish when we get end from server
+ break;
+ } else if (line.startsWith(ERROR) || line.startsWith(CLIENT_ERROR) || line.startsWith(SERVER_ERROR)) {
+ if (log.isErrorEnabled()) {
+ log.error("++++ failed to query stats");
+ log.error("++++ server response: " + line);
+ }
+ break;
+ }
+
+ statsMaps.put(servers[i], stats);
+ }
+ input.close();
+ } catch (Exception e) {
+
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnStats(this, e);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on stats");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString());
+ }
+
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+ }
+
+ return statsMaps;
+ }
+
+ protected final class NIOLoader {
+ protected Selector selector;
+ protected int numConns = 0;
+ protected AscIIClient mc;
+ protected Connection[] conns;
+
+ public NIOLoader(AscIIClient mc) {
+ this.mc = mc;
+ }
+
+ private final class Connection {
+
+ public List incoming = new ArrayList();
+ public ByteBuffer outgoing;
+ public SchoonerSockIO sock;
+ public SocketChannel channel;
+ private boolean isDone = false;
+
+ public Connection(SchoonerSockIO sock, StringBuilder request) throws IOException {
+ this.sock = sock;
+ outgoing = ByteBuffer.wrap(request.append("\r\n").toString().getBytes());
+
+ channel = (SocketChannel) sock.getChannel();
+ if (channel == null)
+ throw new IOException("dead connection to: " + sock.getHost());
+
+ channel.configureBlocking(false);
+ channel.register(selector, SelectionKey.OP_WRITE, this);
+ }
+
+ public void close() {
+ try {
+ if (isDone) {
+ channel.configureBlocking(true);
+ sock.close();
+ return;
+ }
+ } catch (IOException e) {
+ log.warn("++++ memcache: unexpected error closing normally", e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+ }
+
+ public boolean isDone() {
+ // if we know we're done, just say so
+ if (isDone)
+ return true;
+
+ // else find out the hard way
+ int strPos = B_END.length - 1;
+
+ int bi = incoming.size() - 1;
+ while (bi >= 0 && strPos >= 0) {
+ ByteBuffer buf = incoming.get(bi);
+ int pos = buf.position() - 1;
+ while (pos >= 0 && strPos >= 0) {
+ if (buf.get(pos--) != B_END[strPos--])
+ return false;
+ }
+
+ bi--;
+ }
+
+ isDone = strPos < 0;
+ return isDone;
+ }
+
+ public ByteBuffer getBuffer() {
+ int last = incoming.size() - 1;
+ if (last >= 0 && incoming.get(last).hasRemaining()) {
+ return incoming.get(last);
+ } else {
+ ByteBuffer newBuf = ByteBuffer.allocate(8192);
+ incoming.add(newBuf);
+ return newBuf;
+ }
+ }
+
+ public String toString() {
+ return new StringBuffer().append("Connection to ").append(sock.getHost()).append(" with ")
+ .append(incoming.size()).append(" bufs; done is ").append(isDone).toString();
+ }
+ }
+
+ public void doMulti(boolean asString, Map sockKeys, String[] keys,
+ Map ret) {
+
+ long timeRemaining = 0;
+ try {
+ selector = Selector.open();
+
+ // get the sockets, flip them to non-blocking, and set up data
+ // structures
+ conns = new Connection[sockKeys.keySet().size()];
+ numConns = 0;
+ for (Iterator i = sockKeys.keySet().iterator(); i.hasNext();) {
+ // get SockIO obj from hostname
+ String host = i.next();
+
+ SchoonerSockIO sock = pool.getConnection(host);
+
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this.mc, new IOException("no socket to server available"),
+ keys);
+ return;
+ }
+
+ conns[numConns++] = new Connection(sock, sockKeys.get(host));
+ }
+
+ // the main select loop; ends when
+ // 1) we've received data from all the servers, or
+ // 2) we time out
+ long startTime = System.currentTimeMillis();
+
+ long timeout = pool.getMaxBusy();
+ timeRemaining = timeout;
+
+ while (numConns > 0 && timeRemaining > 0) {
+ int n = selector.select(Math.min(timeout, 5000));
+ if (n > 0) {
+ // we've got some activity; handle it
+ Iterator it = selector.selectedKeys().iterator();
+ while (it.hasNext()) {
+ SelectionKey key = it.next();
+ it.remove();
+ handleKey(key);
+ }
+ } else {
+ // timeout likely... better check
+ // TODO: This seems like a problem area that we need to
+ // figure out how to handle.
+ log.error("selector timed out waiting for activity");
+ }
+
+ timeRemaining = timeout - (System.currentTimeMillis() - startTime);
+ }
+ } catch (IOException e) {
+ return;
+ } finally {
+ log.debug("Disconnecting; numConns=" + numConns + " timeRemaining=" + timeRemaining);
+
+ // run through our conns and either return them to the pool
+ // or forcibly close them
+ try {
+ if (selector != null)
+ selector.close();
+ } catch (IOException ignoreMe) {
+ }
+
+ for (Connection c : conns) {
+ if (c != null)
+ c.close();
+ }
+ }
+
+ // Done! Build the list of results and return them. If we get
+ // here by a timeout, then some of the connections are probably
+ // not done. But we'll return what we've got...
+ for (Connection c : conns) {
+ try {
+ if (c.incoming.size() > 0 && c.isDone())
+ loadMulti(new ByteBufArrayInputStream(c.incoming), ret, asString);
+ } catch (Exception e) {
+ // shouldn't happen; we have all the data already
+ log.debug("Caught the aforementioned exception on " + c);
+ }
+ }
+ }
+
+ public void doMulti(Map sockKeys, String[] keys, Map ret) {
+ doMulti(false, sockKeys, keys, ret);
+ }
+
+ private void handleKey(SelectionKey key) throws IOException {
+ if (key.isReadable())
+ readResponse(key);
+ else if (key.isWritable())
+ writeRequest(key);
+ }
+
+ public void writeRequest(SelectionKey key) throws IOException {
+ ByteBuffer buf = ((Connection) key.attachment()).outgoing;
+ SocketChannel sc = (SocketChannel) key.channel();
+
+ if (buf.hasRemaining()) {
+ sc.write(buf);
+ }
+
+ if (!buf.hasRemaining()) {
+ key.interestOps(SelectionKey.OP_READ);
+ }
+ }
+
+ public void readResponse(SelectionKey key) throws IOException {
+ Connection conn = (Connection) key.attachment();
+ ByteBuffer buf = conn.getBuffer();
+ int count = conn.channel.read(buf);
+ if (count > 0) {
+ if (conn.isDone()) {
+ key.cancel();
+ numConns--;
+ return;
+ }
+ }
+ }
+ }
+
+ public boolean sync(String key, Integer hashCode) {
+ if (key == null) {
+ log.error("null value for key passed to sync()");
+ return false;
+ }
+
+ // get SockIO obj from hash or from key
+ SchoonerSockIO sock = pool.getSock(key, hashCode);
+
+ // return false if unable to get SockIO obj
+ if (sock == null) {
+ return false;
+ }
+
+ // build command
+ StringBuilder command = new StringBuilder("sync ").append(key);
+ command.append("\r\n");
+
+ try {
+ sock.write(command.toString().getBytes());
+
+ // if we get appropriate response back, then we return true
+ // get result code
+ SockInputStream sis = new SockInputStream(sock, Integer.MAX_VALUE);
+ String line = sis.getLine();
+ sis.close();
+ if (SYNCED.equals(line)) {
+ log.info(new StringBuffer().append("++++ sync of key: ").append(key)
+ .append(" from cache was a success").toString());
+
+ // return sock to pool and bail here
+ return true;
+ } else if (NOTFOUND.equals(line)) {
+ log.info(new StringBuffer().append("++++ sync of key: ").append(key)
+ .append(" from cache failed as the key was not found").toString());
+ } else {
+ if (log.isErrorEnabled()) {
+ log.error(new StringBuffer().append("++++ error sync key: ").append(key).toString());
+ log.error(new StringBuffer().append("++++ server response: ").append(line).toString());
+ }
+ }
+ } catch (IOException e) {
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on delete");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+
+ return false;
+ }
+
+ public boolean sync(String key) {
+ return sync(key, null);
+ }
+
+ public boolean syncAll() {
+ return syncAll(null);
+ }
+
+ public boolean syncAll(String[] servers) {
+
+ // get SockIOPool instance
+ // return false if unable to get SockIO obj
+ if (pool == null) {
+ log.error("++++ unable to get SockIOPool instance");
+ return false;
+ }
+
+ // get all servers and iterate over them
+ servers = (servers == null) ? pool.getServers() : servers;
+
+ // if no servers, then return early
+ if (servers == null || servers.length <= 0) {
+ log.error("++++ no servers to sync");
+ return false;
+ }
+
+ boolean success = true;
+
+ for (int i = 0; i < servers.length; i++) {
+
+ SchoonerSockIO sock = pool.getConnection(servers[i]);
+ if (sock == null) {
+ log.error("++++ unable to get connection to : " + servers[i]);
+ success = false;
+ continue;
+ }
+
+ // build command
+ String command = "sync_all\r\n";
+
+ try {
+ sock.write(command.getBytes());
+ // if we get appropriate response back, then we return true
+ // get result code
+ SockInputStream sis = new SockInputStream(sock, Integer.MAX_VALUE);
+ String line = sis.getLine();
+ sis.close();
+ success = (SYNCED.equals(line)) ? success && true : false;
+ } catch (IOException e) {
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on flushAll");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+
+ success = false;
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+ }
+
+ return success;
+ }
+
+ @Override
+ public void setDefaultEncoding(String defaultEncoding) {
+ this.defaultEncoding = defaultEncoding;
+ }
+
+ @Override
+ public void setPrimitiveAsString(boolean primitiveAsString) {
+ this.primitiveAsString = primitiveAsString;
+ }
+
+ @Override
+ public void setSanitizeKeys(boolean sanitizeKeys) {
+ this.sanitizeKeys = sanitizeKeys;
+ }
+
+ private String sanitizeKey(String key) throws UnsupportedEncodingException {
+ return (sanitizeKeys) ? URLEncoder.encode(key, "UTF-8") : key;
+ }
+
+}
diff --git a/src/main/java/com/schooner/MemCached/AscIIUDPClient.java b/src/main/java/com/schooner/MemCached/AscIIUDPClient.java
new file mode 100755
index 0000000..2c080cc
--- /dev/null
+++ b/src/main/java/com/schooner/MemCached/AscIIUDPClient.java
@@ -0,0 +1,1003 @@
+/*******************************************************************************
+ * Copyright (c) 2009 Schooner Information Technology, Inc.
+ * All rights reserved.
+ *
+ * http://www.schoonerinfotech.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ ******************************************************************************/
+package com.schooner.MemCached;
+
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+
+import com.schooner.MemCached.command.DeletionCommand;
+import com.schooner.MemCached.command.FlushAllCommand;
+import com.schooner.MemCached.command.IncrdecrCommand;
+import com.schooner.MemCached.command.RetrievalCommand;
+import com.schooner.MemCached.command.StatsCommand;
+import com.schooner.MemCached.command.StorageCommand;
+import com.schooner.MemCached.command.SyncAllCommand;
+import com.schooner.MemCached.command.SyncCommand;
+import com.whalin.MemCached.ErrorHandler;
+import com.whalin.MemCached.MemCachedClient;
+
+/**
+ * This client implements the UDP protocol of memcached in a very high
+ * performance way.
+ *
+ * Please use the wrapper class {@link MemCachedClient} for accessing the
+ * memcached server.
+ *
+ *
+ * When you are using memcached UDP protocol, pay attention that the data size
+ * limit is about 64K due to the datagram length limit of UDP protocol.
+ *
+ *
+ * A UDP datagram length field specifies the length in bytes of the entire
+ * datagram: header and data. The minimum length is 8 bytes since that's the
+ * length of the header. The field size sets a theoretical limit of 65,535 bytes
+ * (8 byte header + 65,527 bytes of data) for a UDP datagram. The practical
+ * limit for the data length which is imposed by the underlying IPv4 protocol is
+ * 65,507 bytes (65,535 − 8 byte UDP header − 20 byte IP header).
+ *
+ * @author Xingen Wang
+ * @since 2.5.0
+ * @see BinaryClient
+ */
+public class AscIIUDPClient extends MemCachedClient {
+ private TransCoder transCoder = new ObjectTransCoder();
+
+ // pool instance
+ private SchoonerSockIOPool pool;
+
+ // which pool to use
+ private String poolName;
+
+ // flags
+ private boolean sanitizeKeys;
+ @SuppressWarnings("unused")
+ private boolean primitiveAsString;
+ @SuppressWarnings("unused")
+ private boolean compressEnable;
+ @SuppressWarnings("unused")
+ private long compressThreshold;
+ @SuppressWarnings("unused")
+ private String defaultEncoding = "utf-8";
+
+ public static final byte B_DELIMITER = 32;
+ public static final byte B_RETURN = (byte) 13;
+
+ public boolean isUseBinaryProtocol() {
+ return false;
+ }
+
+ /**
+ * Creates a new instance of MemCachedClient.
+ */
+ public AscIIUDPClient() {
+ this("default");
+ }
+
+ /**
+ * Creates a new instance of MemCachedClient accepting a passed in pool
+ * name.
+ *
+ * @param poolName
+ * name of SockIOPool
+ * @param binaryProtocal
+ * whether use binary protocol.
+ */
+ public AscIIUDPClient(String poolName) {
+ this.poolName = poolName;
+ init();
+ }
+
+ public AscIIUDPClient(String poolName, ClassLoader cl, ErrorHandler eh) {
+ this.poolName = poolName;
+ this.classLoader = cl;
+ this.errorHandler = eh;
+ init();
+ }
+
+ /**
+ * Initializes client object to defaults.
+ *
+ * This enables compression and sets compression threshhold to 15 KB.
+ */
+ private void init() {
+ this.sanitizeKeys = true;
+ this.primitiveAsString = false;
+ this.compressEnable = true;
+ this.compressThreshold = COMPRESS_THRESH;
+ this.defaultEncoding = "UTF-8";
+ this.poolName = (this.poolName == null) ? "default" : this.poolName;
+
+ // get a pool instance to work with for the life of this instance
+ this.pool = SchoonerSockIOPool.getInstance(poolName);
+ }
+
+ public boolean set(String key, Object value) {
+ return set("set", key, value, null, null, 0L);
+ }
+
+ public boolean set(String key, Object value, Integer hashCode) {
+ return set("set", key, value, null, hashCode, 0L);
+ }
+
+ public boolean set(String key, Object value, Date expiry) {
+ return set("set", key, value, expiry, null, 0L);
+ }
+
+ public boolean set(String key, Object value, Date expiry, Integer hashCode) {
+ return set("set", key, value, expiry, hashCode, 0L);
+ }
+
+ public boolean set(String key, Object value, Date expiry, Integer hashCode, boolean asString) {
+ return set("set", key, value, expiry, hashCode, 0L);
+ }
+
+ public boolean add(String key, Object value) {
+ return set("add", key, value, null, null, 0L);
+ }
+
+ public boolean add(String key, Object value, Integer hashCode) {
+ return set("add", key, value, null, hashCode, 0L);
+ }
+
+ public boolean add(String key, Object value, Date expiry) {
+ return set("add", key, value, expiry, null, 0L);
+ }
+
+ public boolean add(String key, Object value, Date expiry, Integer hashCode) {
+ return set("add", key, value, expiry, hashCode, 0L);
+ }
+
+ public boolean append(String key, Object value, Integer hashCode) {
+ return set("append", key, value, null, hashCode, 0L);
+ }
+
+ public boolean append(String key, Object value) {
+ return set("append", key, value, null, null, 0L);
+ }
+
+ public boolean cas(String key, Object value, Integer hashCode, long casUnique) {
+ return set("cas", key, value, null, hashCode, casUnique);
+ }
+
+ public boolean cas(String key, Object value, Date expiry, long casUnique) {
+ return set("cas", key, value, expiry, null, casUnique);
+ }
+
+ public boolean cas(String key, Object value, Date expiry, Integer hashCode, long casUnique) {
+ return set("cas", key, value, expiry, hashCode, casUnique);
+ }
+
+ public boolean cas(String key, Object value, long casUnique) {
+ return set("cas", key, value, null, null, casUnique);
+ }
+
+ public boolean prepend(String key, Object value, Integer hashCode) {
+ return set("prepend", key, value, null, hashCode, 0L);
+ }
+
+ public boolean prepend(String key, Object value) {
+ return set("prepend", key, value, null, null, 0L);
+ }
+
+ public boolean replace(String key, Object value) {
+ return set("replace", key, value, null, null, 0L);
+ }
+
+ public boolean replace(String key, Object value, Integer hashCode) {
+ return set("replace", key, value, null, hashCode, 0L);
+ }
+
+ public boolean replace(String key, Object value, Date expiry) {
+ return set("replace", key, value, expiry, null, 0L);
+ }
+
+ public boolean replace(String key, Object value, Date expiry, Integer hashCode) {
+ return set("replace", key, value, expiry, hashCode, 0L);
+ }
+
+ /**
+ * Stores data to cache.
+ *
+ * If data does not already exist for this key on the server, or if the key
+ * is being
+ * deleted, the specified value will not be stored.
+ * The server will automatically delete the value when the expiration time
+ * has been reached.
+ *
+ * If compression is enabled, and the data is longer than the compression
+ * threshold
+ * the data will be stored in compressed form.
+ *
+ * As of the current release, all objects stored will use java
+ * serialization.
+ *
+ * @param cmdname
+ * action to take (set, add, replace)
+ * @param key
+ * key to store cache under
+ * @param value
+ * object to cache
+ * @param expiry
+ * expiration
+ * @param hashCode
+ * if not null, then the int hashcode to use
+ * @return true/false indicating success
+ */
+ private boolean set(String cmdname, String key, Object value, Date expiry, Integer hashCode, Long casUnique) {
+
+ if (cmdname == null || key == null) {
+ log.error("key is null or cmd is null/empty for set()");
+ return false;
+ }
+
+ try {
+ key = sanitizeKey(key);
+ } catch (UnsupportedEncodingException e) {
+
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnSet(this, e, key);
+
+ log.error("failed to sanitize your key!", e);
+ return false;
+ }
+
+ if (value == null) {
+ log.error("trying to store a null value to cache");
+ return false;
+ }
+
+ // get SockIO obj
+ SchoonerSockIO sock = pool.getSock(key, hashCode);
+
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnSet(this, new IOException("no socket to server available"), key);
+ return false;
+ }
+
+ if (expiry == null)
+ expiry = new Date(0);
+
+ try {
+ StorageCommand setCmd = new StorageCommand(cmdname, key, value, expiry, hashCode, casUnique, transCoder);
+ short rid = setCmd.request(sock);
+ return setCmd.response(sock, rid);
+ } catch (IOException e) {
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnSet(this, e, key);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on set");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * set transcoder. TransCoder is used to customize the serialization and
+ * deserialization.
+ *
+ * @param transCoder
+ */
+ public void setTransCoder(TransCoder transCoder) {
+ this.transCoder = transCoder;
+ }
+
+ public long addOrDecr(String key) {
+ return addOrDecr(key, 0, null);
+ }
+
+ public long addOrDecr(String key, long inc) {
+ return addOrDecr(key, inc, null);
+ }
+
+ public long addOrDecr(String key, long inc, Integer hashCode) {
+ boolean ret = add(key, "" + inc, hashCode);
+ if (ret) {
+ return inc;
+ } else {
+ return incrdecr("decr", key, inc, hashCode);
+ }
+ }
+
+ public long addOrIncr(String key) {
+ return addOrIncr(key, 0, null);
+ }
+
+ public long addOrIncr(String key, long inc) {
+ return addOrIncr(key, inc, null);
+ }
+
+ public long addOrIncr(String key, long inc, Integer hashCode) {
+ boolean ret = add(key, "" + inc, hashCode);
+
+ if (ret) {
+ return inc;
+ } else {
+ return incrdecr("incr", key, inc, hashCode);
+ }
+ }
+
+ public long decr(String key) {
+ return incrdecr("decr", key, 1, null);
+ }
+
+ public long decr(String key, long inc) {
+ return incrdecr("decr", key, inc, null);
+ }
+
+ public long decr(String key, long inc, Integer hashCode) {
+ return incrdecr("decr", key, inc, hashCode);
+ }
+
+ public boolean delete(String key) {
+ return delete(key, null, null);
+ }
+
+ public boolean delete(String key, Date expiry) {
+ return delete(key, null, expiry);
+ }
+
+ public boolean flushAll() {
+ return flushAll(null);
+ }
+
+ public boolean flushAll(String[] servers) {
+ // get SockIOPool instance
+ // return false if unable to get SockIO obj
+ if (pool == null) {
+ log.error("++++ unable to get SockIOPool instance");
+ return false;
+ }
+
+ // get all servers and iterate over them
+ servers = (servers == null) ? pool.getServers() : servers;
+
+ // if no servers, then return early
+ if (servers == null || servers.length <= 0) {
+ log.error("++++ no servers to flush");
+ return false;
+ }
+
+ boolean success = true;
+
+ for (int i = 0; i < servers.length; i++) {
+
+ SchoonerSockIO sock = pool.getConnection(servers[i]);
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnFlush(this, new IOException("no socket to server available"));
+
+ log.error("++++ unable to get connection to : " + servers[i]);
+ success = false;
+ continue;
+ }
+
+ try {
+ FlushAllCommand flushallCmd = new FlushAllCommand();
+ short rid = flushallCmd.request(sock);
+ success = flushallCmd.response(sock, rid);
+
+ if (!success) {
+ return success;
+ }
+ } catch (IOException e) {
+ // if we have an errorHandler, use its hook
+ errorHandler.handleErrorOnFlush(this, e);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on flushAll");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+
+ success = false;
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+ }
+
+ return success;
+ }
+
+ public Object get(String key) {
+ return get(key, null);
+ }
+
+ public Object get(String key, Integer hashCode) {
+ return get("get", key, hashCode).value;
+ }
+
+ public Map getMulti(String[] keys) {
+ return getMulti(keys, null);
+ }
+
+ public Map getMulti(String[] keys, Integer[] hashCodes) {
+
+ if (keys == null || keys.length == 0) {
+ log.error("missing keys for getMulti()");
+ return null;
+ }
+
+ Map ret = new HashMap(keys.length);
+
+ for (int i = 0; i < keys.length; ++i) {
+
+ String key = keys[i];
+ if (key == null) {
+ log.error("null key, so skipping");
+ continue;
+ }
+
+ Integer hash = null;
+ if (hashCodes != null && hashCodes.length > i)
+ hash = hashCodes[i];
+
+ // get SockIO obj from cache key
+ SchoonerSockIO sock = pool.getSock(key, hash);
+
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, new IOException("no socket to server available"), key);
+ continue;
+ }
+
+ ret.put(key, get("get", key, hash).value);
+ sock.close();
+ }
+ return ret;
+ }
+
+ public Object[] getMultiArray(String[] keys) {
+ return getMultiArray(keys, null);
+ }
+
+ public Object[] getMultiArray(String[] keys, Integer[] hashCodes) {
+
+ Map data = getMulti(keys, hashCodes);
+
+ if (data == null)
+ return null;
+
+ Object[] res = new Object[keys.length];
+ for (int i = 0; i < keys.length; i++) {
+ res[i] = data.get(keys[i]);
+ }
+
+ return res;
+ }
+
+ /**
+ * Retrieve multiple objects from the memcache.
+ *
+ * This is recommended over repeated calls to {@link #get(String) get()},
+ * since it
+ * is more efficient.
+ *
+ * @param keys
+ * String array of keys to retrieve
+ * @param hashCodes
+ * if not null, then the Integer array of hashCodes
+ * @param asString
+ * if true, retrieve string vals
+ * @return Object array ordered in same order as key array containing
+ * results
+ */
+ public Object[] getMultiArray(String[] keys, Integer[] hashCodes, boolean asString) {
+
+ Map data = getMulti(keys, hashCodes, asString);
+
+ if (data == null)
+ return null;
+
+ Object[] res = new Object[keys.length];
+ for (int i = 0; i < keys.length; i++) {
+ res[i] = data.get(keys[i]);
+ }
+
+ return res;
+ }
+
+ public MemcachedItem gets(String key) {
+ return gets(key, null);
+ }
+
+ public MemcachedItem gets(String key, Integer hashCode) {
+ return get("gets", key, hashCode);
+ }
+
+ /**
+ * get memcached item from server.
+ *
+ * @param cmd
+ * cmd to be used, get/gets
+ * @param key
+ * specified key
+ * @param hashCode
+ * specified hashcode
+ * @return memcached item with value in it.
+ */
+ private MemcachedItem get(String cmd, String key, Integer hashCode) {
+ MemcachedItem item = new MemcachedItem();
+
+ if (key == null) {
+ log.error("key is null for get()");
+ return item;
+ }
+
+ try {
+ key = sanitizeKey(key);
+ } catch (UnsupportedEncodingException e) {
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, e, key);
+
+ log.error("failed to sanitize your key!", e);
+ return null;
+ }
+
+ // get SockIO obj using cache key
+ SchoonerSockIO sock = pool.getSock(key, hashCode);
+
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, new IOException("no socket to server available"), key);
+ return item;
+ }
+
+ RetrievalCommand retrieval = new RetrievalCommand(cmd, key);
+ try {
+ short rid = retrieval.request(sock);
+ return retrieval.response(sock, transCoder, rid);
+ } catch (IOException e) {
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+ return item;
+
+ }
+
+ public long incr(String key) {
+ return incrdecr("incr", key, 1, null);
+ }
+
+ public long incr(String key, long inc) {
+ return incrdecr("incr", key, inc, null);
+ }
+
+ public long incr(String key, long inc, Integer hashCode) {
+ return incrdecr("incr", key, inc, hashCode);
+ }
+
+ public boolean keyExists(String key) {
+ return (this.get(key, null) != null);
+ }
+
+ public Map> stats() {
+ return stats(null);
+ }
+
+ public Map> stats(String[] servers) {
+ return stats(servers, "stats\r\n", STATS);
+ }
+
+ public Map> statsCacheDump(int slabNumber, int limit) {
+ return statsCacheDump(null, slabNumber, limit);
+ }
+
+ public Map> statsCacheDump(String[] servers, int slabNumber, int limit) {
+ return stats(servers, String.format("stats cachedump %d %d\r\n", slabNumber, limit), ITEM);
+ }
+
+ public Map> statsItems() {
+ return statsItems(null);
+ }
+
+ public Map> statsItems(String[] servers) {
+ return stats(servers, "stats items\r\n", STATS);
+ }
+
+ public Map> statsSlabs() {
+ return statsSlabs(null);
+ }
+
+ public Map> statsSlabs(String[] servers) {
+ return stats(servers, "stats slabs\r\n", STATS);
+ }
+
+ public boolean sync(String key, Integer hashCode) {
+ if (key == null) {
+ if (log.isErrorEnabled())
+ log.error("null value for key passed to delete()");
+ return false;
+ }
+
+ // get SockIO obj from hash or from key
+ SchoonerSockIO sock = pool.getSock(key, hashCode);
+
+ // return false if unable to get SockIO obj
+ if (sock == null) {
+ return false;
+ }
+
+ try {
+ SyncCommand syncCmd = new SyncCommand(key, hashCode);
+ short rid = syncCmd.request(sock);
+ return syncCmd.response(sock, rid);
+ } catch (IOException e) {
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on delete");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+
+ return false;
+ }
+
+ public boolean sync(String key) {
+ return sync(key, null);
+ }
+
+ public boolean syncAll() {
+ return syncAll(null);
+ }
+
+ public boolean syncAll(String[] servers) {
+ // get SockIOPool instance
+ // return false if unable to get SockIO obj
+ if (pool == null) {
+ log.error("++++ unable to get SockIOPool instance");
+ return false;
+ }
+
+ // get all servers and iterate over them
+ servers = (servers == null) ? pool.getServers() : servers;
+
+ // if no servers, then return early
+ if (servers == null || servers.length <= 0) {
+ log.error("++++ no servers to sync");
+ return false;
+ }
+
+ boolean success = true;
+
+ for (int i = 0; i < servers.length; i++) {
+
+ SchoonerSockIO sock = pool.getConnection(servers[i]);
+ if (sock == null) {
+ log.error("++++ unable to get connection to : " + servers[i]);
+ success = false;
+ continue;
+ }
+
+ try {
+ SyncAllCommand syncCmd = new SyncAllCommand();
+ short rid = syncCmd.request(sock);
+ success = syncCmd.response(sock, rid);
+
+ if (!success)
+ return false;
+ } catch (IOException e) {
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exceptionthrown while writing bytes to server on flushAll");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+
+ success = false;
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+ }
+
+ return success;
+ }
+
+ public boolean delete(String key, Integer hashCode, Date expiry) {
+
+ if (key == null) {
+ log.error("null value for key passed to delete()");
+ return false;
+ }
+
+ // get SockIO obj from hash or from key
+ SchoonerSockIO sock = pool.getSock(key, hashCode);
+
+ // return false if unable to get SockIO obj
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnDelete(this, new IOException("no socket to server available"), key);
+ return false;
+ }
+
+ try {
+ DeletionCommand deletion = new DeletionCommand(key, hashCode, expiry);
+ short rid = deletion.request(sock);
+ return deletion.response(sock, rid);
+ } catch (IOException e) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnDelete(this, e, key);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on delete");
+ log.error(e.getMessage(), e);
+ }
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Increments/decrements the value at the specified key by inc.
+ *
+ * Note that the server uses a 32-bit unsigned integer, and checks for
+ * underflow. In the event of underflow, the result will be zero. Because
+ * Java lacks unsigned types, the value is returned as a 64-bit integer.
+ * The server will only decrement a value if it already exists;
+ * if a value is not found, -1 will be returned.
+ *
+ * @param cmdname
+ * increment/decrement
+ * @param key
+ * cache key
+ * @param inc
+ * amount to incr or decr
+ * @param hashCode
+ * if not null, then the int hashcode to use
+ * @return new value or -1 if not exist
+ */
+ private long incrdecr(String cmdname, String key, long inc, Integer hashCode) {
+
+ if (key == null) {
+ log.error("null key for incrdecr()");
+ return -1;
+ }
+
+ try {
+ key = sanitizeKey(key);
+ } catch (UnsupportedEncodingException e) {
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, e, key);
+
+ log.error("failed to sanitize your key!", e);
+ return -1;
+ }
+
+ // get SockIO obj for given cache key
+ SchoonerSockIO sock = pool.getSock(key, hashCode);
+
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnSet(this, new IOException("no socket to server available"), key);
+ return -1;
+ }
+
+ try {
+ IncrdecrCommand idCmd = new IncrdecrCommand(cmdname, key, inc, hashCode);
+ short rid = idCmd.request(sock);
+ if (idCmd.response(sock, rid)) {
+ return idCmd.getResult();
+ } else {
+ return -1;
+ }
+ } catch (IOException e) {
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, e, key);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on incr/decr");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+
+ return -1;
+ }
+
+ private Map> stats(String[] servers, String command, String lineStart) {
+
+ if (command == null || command.trim().equals("")) {
+ log.error("++++ invalid / missing command for stats()");
+ return null;
+ }
+
+ // get all servers and iterate over them
+ servers = (servers == null) ? pool.getServers() : servers;
+
+ // if no servers, then return early
+ if (servers == null || servers.length <= 0) {
+ log.error("++++ no servers to check stats");
+ return null;
+ }
+
+ // array of stats Maps
+ Map> statsMaps = new HashMap>();
+
+ for (int i = 0; i < servers.length; i++) {
+
+ SchoonerSockIO sock = pool.getConnection(servers[i]);
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnStats(this, new IOException("no socket to server available"));
+ continue;
+ }
+ // build command
+ try {
+ StatsCommand statsCmd = new StatsCommand(command, lineStart);
+ short rid = statsCmd.request(sock);
+ Map stats = statsCmd.response(sock, rid);
+ statsMaps.put(servers[i], stats);
+ } catch (IOException e) {
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnStats(this, e);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on stats");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+ }
+
+ return statsMaps;
+ }
+
+ @Override
+ public void setDefaultEncoding(String defaultEncoding) {
+ this.defaultEncoding = defaultEncoding;
+ }
+
+ @Override
+ public void setPrimitiveAsString(boolean primitiveAsString) {
+ this.primitiveAsString = primitiveAsString;
+ }
+
+ @Override
+ public void setSanitizeKeys(boolean sanitizeKeys) {
+ this.sanitizeKeys = sanitizeKeys;
+ }
+
+ private String sanitizeKey(String key) throws UnsupportedEncodingException {
+ return (sanitizeKeys) ? URLEncoder.encode(key, "UTF-8") : key;
+ }
+
+ @Override
+ public Object get(String key, Integer hashCode, boolean asString) {
+ return get("get", key, hashCode).value;
+ }
+
+ @Override
+ public Map getMulti(String[] keys, Integer[] hashCodes, boolean asString) {
+ return getMulti(keys, hashCodes);
+ }
+
+}
diff --git a/src/main/java/com/schooner/MemCached/AuthInfo.java b/src/main/java/com/schooner/MemCached/AuthInfo.java
new file mode 100644
index 0000000..5df5b74
--- /dev/null
+++ b/src/main/java/com/schooner/MemCached/AuthInfo.java
@@ -0,0 +1,41 @@
+package com.schooner.MemCached;
+
+import javax.security.auth.callback.CallbackHandler;
+
+/**
+ * @author Meng Li
+ * @since 2.6.1
+ * @see AuthInfo
+ */
+public class AuthInfo {
+
+ private final CallbackHandler callbackHandler;
+ private final String[] mechanisms;
+
+ public AuthInfo(CallbackHandler callbackHandler, String[] mechanisms) {
+ super();
+ this.callbackHandler = callbackHandler;
+ this.mechanisms = mechanisms;
+ }
+
+ public static AuthInfo plain(String username, String password) {
+ return new AuthInfo(new PlainCallbackHandler(username, password), new String[] { "PLAIN" });
+ }
+
+ public static AuthInfo cramMD5(String username, String password) {
+ return new AuthInfo(new PlainCallbackHandler(username, password), new String[] { "CRAM-MD5" });
+ }
+
+ public static AuthInfo typical(String username, String password) {
+ return new AuthInfo(new PlainCallbackHandler(username, password), new String[] { "CRAM-MD5", "PLAIN" });
+ }
+
+ public CallbackHandler getCallbackHandler() {
+ return callbackHandler;
+ }
+
+ public String[] getMechanisms() {
+ return mechanisms;
+ }
+
+}
diff --git a/src/main/java/com/schooner/MemCached/AuthSchoonerSockIOFactory.java b/src/main/java/com/schooner/MemCached/AuthSchoonerSockIOFactory.java
new file mode 100644
index 0000000..4aedd35
--- /dev/null
+++ b/src/main/java/com/schooner/MemCached/AuthSchoonerSockIOFactory.java
@@ -0,0 +1,105 @@
+package com.schooner.MemCached;
+
+import java.io.DataInputStream;
+
+import javax.security.sasl.Sasl;
+import javax.security.sasl.SaslClient;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.whalin.MemCached.MemCachedClient;
+
+/**
+ * * {@link AuthSchoonerSockIOFactory} is used to create and destroy socket for
+ * connection pool with authorized information.
+ *
+ * @author Meng Li
+ * @since 2.6.1
+ * @see AuthSchoonerSockIOFactory
+ */
+public class AuthSchoonerSockIOFactory extends SchoonerSockIOFactory {
+
+ // logger
+ public static Logger log = LoggerFactory.getLogger(AuthSchoonerSockIOFactory.class);
+
+ public final static String NTLM = "NTLM";
+ public final static String PLAIN = "PLAIN";
+ public final static String LOGIN = "LOGIN";
+ public final static String DIGEST_MD5 = "DIGEST-MD5";
+ public final static String CRAM_MD5 = "CRAM-MD5";
+ public final static String ANONYMOUS = "ANONYMOUS";
+
+ public static final byte[] EMPTY_BYTES = new byte[0];
+
+ private AuthInfo authInfo;
+
+ public AuthSchoonerSockIOFactory(String host, boolean isTcp, int bufferSize, int socketTO, int socketConnectTO,
+ boolean nagle, AuthInfo authInfo) {
+ super(host, isTcp, bufferSize, socketTO, socketConnectTO, nagle);
+ this.authInfo = authInfo;
+ }
+
+ @Override
+ public Object makeObject() throws Exception {
+ SchoonerSockIO socket = createSocket(host);
+ auth(socket);
+ return socket;
+ }
+
+ private void auth(SchoonerSockIO socket) throws Exception {
+ SaslClient saslClient = Sasl.createSaslClient(authInfo.getMechanisms(), null, "memcached", host, null,
+ this.authInfo.getCallbackHandler());
+
+ byte[] authData = saslClient.hasInitialResponse() ? saslClient.evaluateChallenge(EMPTY_BYTES) : EMPTY_BYTES;
+
+ authData = sendAuthData(socket, MemCachedClient.OPCODE_START_AUTH, saslClient.getMechanismName(), authData);
+ if (authData == null)
+ return;
+ authData = saslClient.evaluateChallenge(authData);
+ if (sendAuthData(socket, MemCachedClient.OPCODE_AUTH_STEPS, saslClient.getMechanismName(), authData) == null)
+ return;
+
+ if (log.isErrorEnabled())
+ log.error("Auth Failed: mechanism = " + saslClient.getMechanismName());
+ throw new Exception();
+ }
+
+ private byte[] sendAuthData(SchoonerSockIO sock, byte opcode, String mechanism, byte[] authData) throws Exception {
+ sock.writeBuf.clear();
+ sock.writeBuf.put(MemCachedClient.MAGIC_REQ);
+ sock.writeBuf.put(opcode);
+ sock.writeBuf.putShort((short) mechanism.length());
+ sock.writeBuf.putInt(0);
+ sock.writeBuf.putInt(mechanism.length() + authData.length);
+ sock.writeBuf.putInt(0);
+ sock.writeBuf.putLong(0);
+ sock.writeBuf.put(mechanism.getBytes());
+ sock.writeBuf.put(authData);
+
+ // write the buffer to server
+ // now write the data to the cache server
+ sock.flush();
+ // get result code
+ DataInputStream dis = new DataInputStream(new SockInputStream(sock, Integer.MAX_VALUE));
+ dis.readInt();
+ dis.readByte();
+ dis.readByte();
+ byte[] response = null;
+ short status = dis.readShort();
+ if (status == MemCachedClient.FURTHER_AUTH) {
+ int length = dis.readInt();
+ response = new byte[length];
+ dis.readInt();
+ dis.readLong();
+ dis.read(response);
+ } else if (status == MemCachedClient.AUTH_FAILED) {
+ if (log.isErrorEnabled())
+ log.error("Auth Failed: mechanism = " + mechanism);
+ dis.close();
+ throw new Exception();
+ }
+ dis.close();
+ return response;
+ }
+}
diff --git a/src/main/java/com/schooner/MemCached/BinaryClient.java b/src/main/java/com/schooner/MemCached/BinaryClient.java
new file mode 100755
index 0000000..e5908e2
--- /dev/null
+++ b/src/main/java/com/schooner/MemCached/BinaryClient.java
@@ -0,0 +1,1714 @@
+/*******************************************************************************
+ * Copyright (c) 2009 Schooner Information Technology, Inc.
+ * All rights reserved.
+ *
+ * http://www.schoonerinfotech.com/
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ ******************************************************************************/
+package com.schooner.MemCached;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
+import java.nio.ByteBuffer;
+import java.nio.channels.SelectionKey;
+import java.nio.channels.Selector;
+import java.nio.channels.SocketChannel;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.zip.GZIPInputStream;
+
+import com.whalin.MemCached.ErrorHandler;
+import com.whalin.MemCached.MemCachedClient;
+
+/**
+ * This client implements the binary protocol of memcached in a very high
+ * performance way.
+ *
+ * Please use the wrapper class {@link MemCachedClient} for accessing the
+ * memcached server.
+ *
+ * @author Xingen Wang
+ * @since 2.5.0
+ * @see AscIIClient
+ */
+public class BinaryClient extends MemCachedClient {
+
+ private TransCoder transCoder = new ObjectTransCoder();
+
+ // pool instance
+ private SchoonerSockIOPool pool;
+
+ // which pool to use
+ private String poolName;
+
+ // flags
+ private boolean sanitizeKeys;
+ private boolean primitiveAsString;
+ @SuppressWarnings("unused")
+ private boolean compressEnable;
+ @SuppressWarnings("unused")
+ private long compressThreshold;
+ private String defaultEncoding = "utf-8";
+
+ public boolean isUseBinaryProtocol() {
+ return true;
+ }
+
+ /**
+ * Creates a new instance of MemCachedClient.
+ */
+ public BinaryClient() {
+ this(null);
+ }
+
+ /**
+ * Creates a new instance of MemCachedClient accepting a passed in pool
+ * name.
+ *
+ * @param poolName
+ * name of SockIOPool
+ */
+ public BinaryClient(String poolName) {
+ this(poolName, null, null);
+ }
+
+ public BinaryClient(String poolName, ClassLoader cl, ErrorHandler eh) {
+ super((MemCachedClient) null);
+ this.poolName = poolName;
+ this.classLoader = cl;
+ this.errorHandler = eh;
+ init();
+ }
+
+ /**
+ * Initializes client object to defaults.
+ *
+ * This enables compression and sets compression threshhold to 15 KB.
+ */
+ private void init() {
+ this.poolName = (this.poolName == null) ? "default" : this.poolName;
+
+ // get a pool instance to work with for the life of this instance
+ this.pool = SchoonerSockIOPool.getInstance(poolName);
+ }
+
+ public boolean keyExists(String key) {
+ return (this.get(key, null) != null);
+ }
+
+ public boolean delete(String key) {
+ return delete(key, null, null);
+ }
+
+ public boolean delete(String key, Date expiry) {
+ return delete(key, null, expiry);
+ }
+
+ public boolean delete(String key, Integer hashCode, Date expiry) {
+
+ if (key == null) {
+ log.error("null value for key passed to delete()");
+ return false;
+ }
+
+ try {
+ key = sanitizeKey(key);
+ } catch (UnsupportedEncodingException e) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnDelete(this, e, key);
+
+ log.error("failed to sanitize your key!", e);
+ return false;
+ }
+
+ // get SockIO obj from hash or from key
+ SchoonerSockIO sock = pool.getSock(key, hashCode);
+
+ // return false if unable to get SockIO obj
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnDelete(this, new IOException("no socket to server available"), key);
+ return false;
+ }
+
+ try {
+ sock.writeBuf.clear();
+ sock.writeBuf.put(MAGIC_REQ);
+ sock.writeBuf.put(OPCODE_DELETE);
+ byte[] keyBuf = key.getBytes();
+ sock.writeBuf.putShort((short) keyBuf.length);
+ sock.writeBuf.putInt(0);
+ sock.writeBuf.putInt(keyBuf.length);
+ sock.writeBuf.putInt(0);
+ sock.writeBuf.putLong(0L);
+ sock.writeBuf.put(keyBuf);
+ sock.flush();
+ // if we get appropriate response back, then we return true
+ // get result code
+ SockInputStream input = new SockInputStream(sock, Integer.MAX_VALUE);
+ DataInputStream dis = new DataInputStream(input);
+ dis.readInt();
+ dis.readShort();
+ short status = dis.readShort();
+ dis.close();
+ if (status == STAT_NO_ERROR) {
+ log.debug("++++ deletion of key: " + key + " from cache was a success");
+
+ // return sock to pool and bail here
+ return true;
+ } else if (status == STAT_KEY_NOT_FOUND) {
+ log.debug("++++ deletion of key: " + key + " from cache failed as the key was not found");
+ } else {
+ if (log.isErrorEnabled()) {
+ log.error("++++ error deleting key: " + key);
+ log.error("++++ server response: " + status);
+ }
+ }
+ } catch (IOException e) {
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnDelete(this, e, key);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on delete");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+
+ sock = null;
+ } catch (RuntimeException e) {
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+
+ return false;
+ }
+
+ public boolean set(String key, Object value) {
+ return set(OPCODE_SET, key, value, null, null, 0L, primitiveAsString);
+ }
+
+ public boolean set(String key, Object value, Integer hashCode) {
+ return set(OPCODE_SET, key, value, null, hashCode, 0L, primitiveAsString);
+ }
+
+ public boolean set(String key, Object value, Date expiry) {
+ return set(OPCODE_SET, key, value, expiry, null, 0L, primitiveAsString);
+ }
+
+ public boolean set(String key, Object value, Date expiry, Integer hashCode) {
+ return set(OPCODE_SET, key, value, expiry, hashCode, 0L, primitiveAsString);
+ }
+
+ public boolean set(String key, Object value, Date expiry, Integer hashCode, boolean asString) {
+ return set(OPCODE_SET, key, value, expiry, hashCode, 0L, asString);
+ }
+
+ public boolean add(String key, Object value) {
+ return set(OPCODE_ADD, key, value, null, null, 0L, primitiveAsString);
+ }
+
+ public boolean add(String key, Object value, Integer hashCode) {
+ return set(OPCODE_ADD, key, value, null, hashCode, 0L, primitiveAsString);
+ }
+
+ public boolean add(String key, Object value, Date expiry) {
+ return set(OPCODE_ADD, key, value, expiry, null, 0L, primitiveAsString);
+ }
+
+ public boolean add(String key, Object value, Date expiry, Integer hashCode) {
+ return set(OPCODE_ADD, key, value, expiry, hashCode, 0L, primitiveAsString);
+ }
+
+ public boolean append(String key, Object value, Integer hashCode) {
+ return apPrepend(OPCODE_APPEND, key, value, hashCode, 0L);
+ }
+
+ public boolean append(String key, Object value) {
+ return apPrepend(OPCODE_APPEND, key, value, null, 0L);
+ }
+
+ public boolean cas(String key, Object value, Integer hashCode, long casUnique) {
+ return set(OPCODE_SET, key, value, null, hashCode, casUnique, primitiveAsString);
+ }
+
+ public boolean cas(String key, Object value, Date expiry, long casUnique) {
+ return set(OPCODE_SET, key, value, expiry, null, casUnique, primitiveAsString);
+ }
+
+ public boolean cas(String key, Object value, Date expiry, Integer hashCode, long casUnique) {
+ return set(OPCODE_SET, key, value, expiry, hashCode, casUnique, primitiveAsString);
+ }
+
+ public boolean cas(String key, Object value, long casUnique) {
+ return set(OPCODE_SET, key, value, null, null, casUnique, primitiveAsString);
+ }
+
+ public boolean prepend(String key, Object value, Integer hashCode) {
+ return apPrepend(OPCODE_PREPEND, key, value, hashCode, 0L);
+ }
+
+ public boolean prepend(String key, Object value) {
+ return apPrepend(OPCODE_PREPEND, key, value, null, 0L);
+ }
+
+ public boolean replace(String key, Object value) {
+ return set(OPCODE_REPLACE, key, value, null, null, 0L, primitiveAsString);
+ }
+
+ public boolean replace(String key, Object value, Integer hashCode) {
+ return set(OPCODE_REPLACE, key, value, null, hashCode, 0L, primitiveAsString);
+ }
+
+ public boolean replace(String key, Object value, Date expiry) {
+ return set(OPCODE_REPLACE, key, value, expiry, null, 0L, primitiveAsString);
+ }
+
+ public boolean replace(String key, Object value, Date expiry, Integer hashCode) {
+ return set(OPCODE_REPLACE, key, value, expiry, hashCode, 0L, primitiveAsString);
+ }
+
+ /**
+ * Set, Add, Replace data to cache.
+ *
+ * If data does not already exist for this key on the server, or if the key
+ * is being
+ * deleted, the specified value will not be stored.
+ * The server will automatically delete the value when the expiration time
+ * has been reached.
+ *
+ * If compression is enabled, and the data is longer than the compression
+ * threshold
+ * the data will be stored in compressed form.
+ *
+ * As of the current release, all objects stored will use java
+ * serialization.
+ *
+ * @param cmdname
+ * action to take (set, add, replace)
+ * @param key
+ * key to store cache under
+ * @param value
+ * object to cache
+ * @param expiry
+ * expiration
+ * @param hashCode
+ * if not null, then the int hashcode to use
+ * @return true/false indicating success
+ */
+ private boolean set(byte opcode, String key, Object value, Date expiry, Integer hashCode, long casUnique,
+ boolean asString) {
+
+ if (key == null) {
+ log.error("key is null or cmd is null/empty for set()");
+ return false;
+ }
+
+ try {
+ key = sanitizeKey(key);
+ } catch (UnsupportedEncodingException e) {
+
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnSet(this, e, key);
+
+ log.error("failed to sanitize your key!", e);
+ return false;
+ }
+
+ if (value == null) {
+ log.error("trying to store a null value to cache");
+ return false;
+ }
+
+ // get SockIO obj
+ SchoonerSockIO sock = pool.getSock(key, hashCode);
+
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnSet(this, new IOException("no socket to server available"), key);
+ return false;
+ }
+
+ if (expiry == null)
+ expiry = new Date(0);
+
+ try {
+ // store flags
+ int flags = asString ? MemCachedClient.MARKER_STRING : NativeHandler.getMarkerFlag(value);
+ byte[] buf = key.getBytes();
+ sock.writeBuf.clear();
+ sock.writeBuf.put(MAGIC_REQ);
+ sock.writeBuf.put(opcode);
+ sock.writeBuf.putShort((short) buf.length);
+ sock.writeBuf.put((byte) 0x08);
+ sock.writeBuf.put((byte) 0);
+ sock.writeBuf.putShort((short) 0);
+ sock.writeBuf.putInt(0);
+ sock.writeBuf.putInt(0);
+ sock.writeBuf.putLong(casUnique);
+ sock.writeBuf.putInt(flags);
+ sock.writeBuf.putInt(new Long(expiry.getTime() / 1000).intValue());
+ sock.writeBuf.put(buf);
+ SockOutputStream output = new SockOutputStream(sock);
+ int valLen = 0;
+ if (flags != MARKER_OTHERS) {
+ byte[] b;
+ if (asString) {
+ b = value.toString().getBytes(defaultEncoding);
+ } else {
+ /*
+ * Using NativeHandler to serialize the value
+ */
+ b = NativeHandler.encode(value);
+ }
+ output.write(b);
+ valLen = b.length;
+ valLen = b.length;
+
+ } else {
+ // always serialize for non-primitive types
+ valLen = transCoder.encode(output, value);
+ }
+ // write serialized object
+ int bodyLen = 0x08 + buf.length + valLen;
+ int oldPosition = sock.writeBuf.position();
+ sock.writeBuf.position(8);
+ // put real object bytes size
+ sock.writeBuf.putInt(bodyLen);
+ // return to correct position.
+ sock.writeBuf.position(oldPosition);
+
+ // write the buffer to server
+ // now write the data to the cache server
+ sock.flush();
+ // get result code
+ DataInputStream dis = new DataInputStream(new SockInputStream(sock, Integer.MAX_VALUE));
+ dis.readInt();
+ dis.readShort();
+ short stat = dis.readShort();
+ dis.close();
+ if (STAT_NO_ERROR == stat) {
+ return true;
+ }
+ } catch (IOException e) {
+
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnSet(this, e, key);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on set");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+
+ sock = null;
+ } catch (RuntimeException e) {
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+
+ return false;
+ }
+
+ /**
+ * Append & Prepend data to cache.
+ *
+ * If data does not already exist for this key on the server, or if the key
+ * is being
+ * deleted, the specified value will not be stored.
+ * The server will automatically delete the value when the expiration time
+ * has been reached.
+ *
+ * If compression is enabled, and the data is longer than the compression
+ * threshold
+ * the data will be stored in compressed form.
+ *
+ * As of the current release, all objects stored will use java
+ * serialization.
+ *
+ * @param cmdname
+ * action to take (set, add, replace)
+ * @param key
+ * key to store cache under
+ * @param value
+ * object to cache
+ * @param hashCode
+ * if not null, then the int hashcode to use
+ * @return true/false indicating success
+ */
+ private boolean apPrepend(byte opcode, String key, Object value, Integer hashCode, Long casUnique) {
+
+ if (key == null) {
+ log.error("key is null or cmd is null/empty for set()");
+ return false;
+ }
+
+ try {
+ key = sanitizeKey(key);
+ } catch (UnsupportedEncodingException e) {
+ log.error("failed to sanitize your key!", e);
+ return false;
+ }
+
+ if (value == null) {
+ log.error("trying to store a null value to cache");
+ return false;
+ }
+
+ // get SockIO obj
+ SchoonerSockIO sock = pool.getSock(key, hashCode);
+
+ if (sock == null) {
+ return false;
+ }
+
+ try {
+ // store flags
+ int flags = NativeHandler.getMarkerFlag(value);
+ byte[] buf = key.getBytes();
+ sock.writeBuf.clear();
+ sock.writeBuf.put(MAGIC_REQ);
+ sock.writeBuf.put(opcode);
+ sock.writeBuf.putShort((short) buf.length);
+ sock.writeBuf.putInt(0);
+ sock.writeBuf.putLong(0L);
+ sock.writeBuf.putLong(casUnique);
+ sock.writeBuf.put(buf);
+ SockOutputStream output = new SockOutputStream(sock);
+ int valLen = 0;
+ if (flags != MARKER_OTHERS) {
+ byte[] b = NativeHandler.encode(value);
+ output.write(b);
+ valLen = b.length;
+ } else {
+ // always serialize for non-primitive types
+ valLen = transCoder.encode(output, value);
+ }
+ // write serialized object
+ int bodyLen = buf.length + valLen;
+ int oldPosition = sock.writeBuf.position();
+ sock.writeBuf.position(8);
+ // put real object bytes size
+ sock.writeBuf.putInt(bodyLen);
+ // return to correct position.
+ sock.writeBuf.position(oldPosition);
+
+ // write the buffer to server
+ // now write the data to the cache server
+ sock.flush();
+ // get result code
+ DataInputStream dis = new DataInputStream(new SockInputStream(sock, Integer.MAX_VALUE));
+ dis.readInt();
+ dis.readShort();
+ short stat = dis.readShort();
+ dis.close();
+ if (STAT_NO_ERROR == stat) {
+ return true;
+ }
+ } catch (IOException e) {
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on set");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+
+ return false;
+ }
+
+ public long addOrIncr(String key) {
+ return addOrIncr(key, 0, null);
+ }
+
+ public long addOrIncr(String key, long inc) {
+ return addOrIncr(key, inc, null);
+ }
+
+ public long addOrIncr(String key, long inc, Integer hashCode) {
+ boolean ret = add(key, "" + inc, hashCode);
+
+ if (ret) {
+ return inc;
+ } else {
+ return incrdecr(OPCODE_INCREMENT, key, inc, hashCode);
+ }
+ }
+
+ public long addOrDecr(String key) {
+ return addOrDecr(key, 0, null);
+ }
+
+ public long addOrDecr(String key, long inc) {
+ return addOrDecr(key, inc, null);
+ }
+
+ public long addOrDecr(String key, long inc, Integer hashCode) {
+ boolean ret = add(key, "" + inc, hashCode);
+ if (ret) {
+ return inc;
+ } else {
+ return incrdecr(OPCODE_DECREMENT, key, inc, hashCode);
+ }
+ }
+
+ public long incr(String key) {
+ return incrdecr(OPCODE_INCREMENT, key, 1, null);
+ }
+
+ public long incr(String key, long inc) {
+ return incrdecr(OPCODE_INCREMENT, key, inc, null);
+ }
+
+ public long incr(String key, long inc, Integer hashCode) {
+ return incrdecr(OPCODE_INCREMENT, key, inc, hashCode);
+ }
+
+ public long decr(String key) {
+ return incrdecr(OPCODE_DECREMENT, key, 1, null);
+ }
+
+ public long decr(String key, long inc) {
+ return incrdecr(OPCODE_DECREMENT, key, inc, null);
+ }
+
+ public long decr(String key, long inc, Integer hashCode) {
+ return incrdecr(OPCODE_DECREMENT, key, inc, hashCode);
+ }
+
+ /**
+ * Increments/decrements the value at the specified key by inc.
+ *
+ * Note that the server uses a 32-bit unsigned integer, and checks for
+ * underflow. In the event of underflow, the result will be zero. Because
+ * Java lacks unsigned types, the value is returned as a 64-bit integer.
+ * The server will only decrement a value if it already exists;
+ * if a value is not found, -1 will be returned.
+ *
+ * @param opcode
+ * increment/decrement
+ * @param key
+ * cache key
+ * @param inc
+ * amount to incr or decr
+ * @param hashCode
+ * if not null, then the int hashcode to use
+ * @return new value or -1 if not exist
+ */
+ private long incrdecr(byte opcode, String key, long inc, Integer hashCode) {
+
+ if (key == null) {
+ log.error("null key for incrdecr()");
+ return -1;
+ }
+
+ try {
+ key = sanitizeKey(key);
+ } catch (UnsupportedEncodingException e) {
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, e, key);
+
+ log.error("failed to sanitize your key!", e);
+ return -1;
+ }
+
+ // get SockIO obj for given cache key
+ SchoonerSockIO sock = pool.getSock(key, hashCode);
+
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnSet(this, new IOException("no socket to server available"), key);
+ return -1;
+ }
+
+ try {
+ sock.writeBuf.clear();
+ sock.writeBuf.put(MAGIC_REQ);
+ sock.writeBuf.put(opcode);
+ byte[] keyBuf = key.getBytes();
+ sock.writeBuf.putShort((short) keyBuf.length);// key size
+ sock.writeBuf.put((byte) 0X14);
+ sock.writeBuf.put((byte) 0);
+ sock.writeBuf.putShort((short) 0);
+ sock.writeBuf.putInt(keyBuf.length + 20); // body total
+ sock.writeBuf.putInt(0);
+ sock.writeBuf.putLong(0L);
+ sock.writeBuf.putLong(inc);
+ sock.writeBuf.putLong(0L);
+ sock.writeBuf.putInt(0);
+ sock.writeBuf.put(keyBuf);
+ sock.flush();
+ // get result code
+ DataInputStream dis = new DataInputStream(new SockInputStream(sock, Integer.MAX_VALUE));
+ dis.readInt();
+ dis.readShort();
+ short status = dis.readShort();
+ dis.close();
+ if (status == STAT_NO_ERROR) {
+
+ dis.readLong();
+ dis.readLong();
+ long res = dis.readLong();
+ return res;
+ } else {
+ if (log.isErrorEnabled()) {
+ log.error(new StringBuffer().append("++++ error incr/decr key: ").append(key).toString());
+ log.error(new StringBuffer().append("++++ server response: ").append(status).toString());
+ }
+ }
+ } catch (IOException e) {
+
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, e, key);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on incr/decr");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+
+ return -1;
+ }
+
+ public Object get(String key) {
+ return get(key, null);
+ }
+
+ public Object get(String key, Integer hashCode) {
+ return get(OPCODE_GET, key, hashCode, false);
+ }
+
+ public MemcachedItem gets(String key) {
+ return gets(key, null);
+ }
+
+ public MemcachedItem gets(String key, Integer hashCode) {
+ return gets(OPCODE_GET, key, hashCode, false);
+ }
+
+ public void setTransCoder(TransCoder transCoder) {
+ this.transCoder = transCoder;
+ }
+
+ public Object[] getMultiArray(String[] keys) {
+ return getMultiArray(keys, null);
+ }
+
+ public Object[] getMultiArray(String[] keys, Integer[] hashCodes) {
+
+ Map data = getMulti(keys, hashCodes);
+
+ if (data == null)
+ return null;
+
+ Object[] res = new Object[keys.length];
+ for (int i = 0; i < keys.length; i++) {
+ res[i] = data.get(keys[i]);
+ }
+
+ return res;
+ }
+
+ public Map getMulti(String[] keys) {
+ return getMulti(keys, null);
+ }
+
+ public Map getMulti(String[] keys, Integer[] hashCodes) {
+ return getMulti(keys, hashCodes, false);
+ }
+
+ /**
+ * Retrieve multiple keys from the memcache.
+ *
+ * This is recommended over repeated calls to {@link #get(String) get()},
+ * since it
+ * is more efficient.
+ *
+ * @param keys
+ * keys to retrieve
+ * @param hashCodes
+ * if not null, then the Integer array of hashCodes
+ * @param asString
+ * if true then retrieve using String val
+ * @return a hashmap with entries for each key is found by the server, keys
+ * that are not found are not entered into the hashmap, but
+ * attempting to retrieve them from the hashmap gives you null.
+ */
+ public Map getMulti(String[] keys, Integer[] hashCodes, boolean asString) {
+
+ if (keys == null || keys.length == 0) {
+ log.error("missing keys for getMulti()");
+ return null;
+ }
+
+ Map> cmdMap = new HashMap>();
+ String[] cleanKeys = new String[keys.length];
+ for (int i = 0; i < keys.length; ++i) {
+
+ String key = keys[i];
+ if (key == null) {
+ log.error("null key, so skipping");
+ continue;
+ }
+
+ Integer hash = null;
+ if (hashCodes != null && hashCodes.length > i)
+ hash = hashCodes[i];
+
+ cleanKeys[i] = key;
+ try {
+ cleanKeys[i] = sanitizeKey(key);
+ } catch (UnsupportedEncodingException e) {
+
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, e, key);
+
+ log.error("failed to sanitize your key!", e);
+ continue;
+ }
+
+ // get SockIO obj from cache key
+ SchoonerSockIO sock = pool.getSock(cleanKeys[i], hash);
+
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, new IOException("no socket to server available"), key);
+ continue;
+ }
+
+ // store in map and list if not already
+ if (!cmdMap.containsKey(sock.getHost()))
+ cmdMap.put(sock.getHost(), new ArrayList());
+
+ cmdMap.get(sock.getHost()).add(cleanKeys[i]);
+
+ // return to pool
+ sock.close();
+ }
+
+ log.debug("multi get socket count : " + cmdMap.size());
+
+ // now query memcache
+ Map ret = new HashMap(keys.length);
+
+ // now use new NIO implementation
+ (new NIOLoader(this)).doMulti(asString, cmdMap, keys, ret);
+
+ // fix the return array in case we had to rewrite any of the keys
+ for (int i = 0; i < keys.length; ++i) {
+
+ // if key!=cleanKey and result has cleankey
+ if (!keys[i].equals(cleanKeys[i]) && ret.containsKey(cleanKeys[i])) {
+ ret.put(keys[i], ret.get(cleanKeys[i]));
+ ret.remove(cleanKeys[i]);
+ }
+
+ // backfill missing keys w/ null value
+ // if (!ret.containsKey(keys[i]))
+ // ret.put(keys[i], null);
+ }
+
+ log.debug("++++ memcache: got back " + ret.size() + " results");
+ return ret;
+ }
+
+ /**
+ * This method loads the data from cache into a Map.
+ *
+ * Pass a SockIO object which is ready to receive data and a HashMap
+ * to store the results.
+ *
+ * @param sock
+ * socket waiting to pass back data
+ * @param hm
+ * hashmap to store data into
+ * @param asString
+ * if true, and if we are using NativehHandler, return string val
+ * @throws IOException
+ * if io exception happens while reading from socket
+ */
+ private void loadMulti(DataInputStream input, Map hm) throws IOException {
+
+ while (true) {
+ input.readByte();
+ byte opcode = input.readByte();
+ if (opcode == OPCODE_GETKQ) {
+ short keyLen = input.readShort();
+ input.readInt();
+ int length = input.readInt() - keyLen - 4;
+ input.readInt();
+ input.readLong();
+ int flag = input.readInt();
+ byte[] keyBuf = new byte[keyLen];
+ input.read(keyBuf);
+ String key = new String(keyBuf);
+
+ // read obj into buffer
+ byte[] buf = new byte[length];
+ input.read(buf);
+
+ // ready object
+ Object o = null;
+ // we can only take out serialized objects
+ if ((flag & F_COMPRESSED) == F_COMPRESSED) {
+ GZIPInputStream gzi = new GZIPInputStream(new ByteArrayInputStream(buf));
+ ByteArrayOutputStream bos = new ByteArrayOutputStream(buf.length);
+ int count;
+ byte[] tmp = new byte[2048];
+ while ((count = gzi.read(tmp)) != -1) {
+ bos.write(tmp, 0, count);
+ }
+ // store uncompressed back to buffer
+ buf = bos.toByteArray();
+ gzi.close();
+ }
+ if (flag != MARKER_OTHERS) {
+ // decoding object
+ try {
+ o = NativeHandler.decode(buf, flag);
+ } catch (Exception e) {
+
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnGet(this, e, key);
+
+ log.error("++++ Exception thrown while trying to deserialize for key: " + key, e);
+ e.printStackTrace();
+ }
+ } else if (transCoder != null) {
+ o = transCoder.decode(new ByteArrayInputStream(buf));
+ }
+ // store the object into the cache
+ hm.put(key, o);
+ } else if (opcode == OPCODE_NOOP) {
+ break;
+ }
+ }
+ }
+
+ public boolean flushAll() {
+ return flushAll(null);
+ }
+
+ public boolean flushAll(String[] servers) {
+
+ // get SockIOPool instance
+ // return false if unable to get SockIO obj
+ if (pool == null) {
+ log.error("++++ unable to get SockIOPool instance");
+ return false;
+ }
+
+ // get all servers and iterate over them
+ servers = (servers == null) ? pool.getServers() : servers;
+
+ // if no servers, then return early
+ if (servers == null || servers.length <= 0) {
+ log.error("++++ no servers to flush");
+ return false;
+ }
+
+ boolean success = true;
+
+ for (int i = 0; i < servers.length; i++) {
+
+ SchoonerSockIO sock = pool.getConnection(servers[i]);
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnFlush(this, new IOException("no socket to server available"));
+ log.error("++++ unable to get connection to : " + servers[i]);
+ success = false;
+ continue;
+ }
+
+ // build command
+ sock.writeBuf.clear();
+ sock.writeBuf.put(MAGIC_REQ);
+ sock.writeBuf.put(OPCODE_FLUSH);
+ sock.writeBuf.putShort((short) 0);
+ sock.writeBuf.putInt(0);
+ sock.writeBuf.putLong(0);
+ sock.writeBuf.putLong(0);
+ // write buffer to server
+
+ try {
+ sock.flush();
+ // if we get appropriate response back, then we return true
+ // get result code
+ DataInputStream dis = new DataInputStream(new SockInputStream(sock, Integer.MAX_VALUE));
+ dis.readInt();
+ dis.readShort();
+ success = dis.readShort() == STAT_NO_ERROR ? success && true : false;
+ dis.close();
+ } catch (IOException e) {
+
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnFlush(this, e);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on flushAll");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+
+ success = false;
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+ }
+
+ return success;
+ }
+
+ public Map> stats() {
+ return stats(null);
+ }
+
+ public Map> stats(String[] servers) {
+ return stats(servers, OPCODE_STAT, null);
+ }
+
+ public Map> statsItems() {
+ return statsItems(null);
+ }
+
+ public Map> statsItems(String[] servers) {
+ return stats(servers, OPCODE_STAT, "items".getBytes());
+ }
+
+ public Map> statsSlabs() {
+ return statsSlabs(null);
+ }
+
+ public Map> statsSlabs(String[] servers) {
+ return stats(servers, OPCODE_STAT, "slabs".getBytes());
+ }
+
+ public Map> statsCacheDump(int slabNumber, int limit) {
+ return statsCacheDump(null, slabNumber, limit);
+ }
+
+ public Map> statsCacheDump(String[] servers, int slabNumber, int limit) {
+ return stats(servers, OPCODE_STAT, String.format("cachedump %d %d", slabNumber, limit).getBytes());
+ }
+
+ private Map> stats(String[] servers, byte opcode, byte[] reqKey) {
+
+ // get all servers and iterate over them
+
+ servers = (servers == null) ? pool.getServers() : servers;
+
+ // if no servers, then return early
+ if (servers == null || servers.length <= 0) {
+ log.error("++++ no servers to check stats");
+ return null;
+ }
+
+ // array of stats Maps
+ Map> statsMaps = new HashMap>();
+
+ short statKeyLen;
+ int statValLen;
+ byte[] key;
+ byte[] value;
+
+ for (int i = 0; i < servers.length; i++) {
+
+ SchoonerSockIO sock = pool.getConnection(servers[i]);
+ if (sock == null) {
+ if (errorHandler != null)
+ errorHandler.handleErrorOnStats(this, new IOException("no socket to server available"));
+ continue;
+ }
+
+ try {
+ // map to hold key value pairs
+ Map stats = new HashMap();
+
+ // stat request
+ sock.writeBuf.clear();
+ sock.writeBuf.put(MAGIC_REQ);
+ sock.writeBuf.put(opcode);
+ if (reqKey != null) {
+ sock.writeBuf.putShort((short) reqKey.length);
+ } else {
+ sock.writeBuf.putShort((short) 0x0000);
+ }
+ sock.writeBuf.put((byte) 0x00);
+ sock.writeBuf.put((byte) 0x00);
+ sock.writeBuf.putShort((short) 0x0000);
+ sock.writeBuf.putInt(0);
+ sock.writeBuf.putInt(0);
+ sock.writeBuf.putLong(0);
+ if (reqKey != null) {
+ sock.writeBuf.put(reqKey);
+ }
+ sock.writeBuf.flip();
+ sock.getChannel().write(sock.writeBuf);
+
+ // response
+ DataInputStream input = new DataInputStream(new SockInputStream(sock, Integer.MAX_VALUE));
+ while (true) {
+ input.skip(2);
+ statKeyLen = input.readShort();
+ input.skip(4);
+ statValLen = input.readInt() - statKeyLen;
+ input.skip(12);
+ if (statKeyLen == 0)
+ break;
+ key = new byte[statKeyLen];
+ value = new byte[statValLen];
+ input.read(key);
+ input.read(value);
+ stats.put(new String(key), new String(value));
+ }
+ statsMaps.put(servers[i], stats);
+ input.close();
+ } catch (IOException e) {
+ // if we have an errorHandler, use its hook
+ if (errorHandler != null)
+ errorHandler.handleErrorOnStats(this, e);
+
+ // exception thrown
+ if (log.isErrorEnabled()) {
+ log.error("++++ exception thrown while writing bytes to server on stats");
+ log.error(e.getMessage(), e);
+ }
+
+ try {
+ sock.sockets.invalidateObject(sock);
+ } catch (Exception e1) {
+ log.error("++++ failed to close socket : " + sock.toString(), e1);
+ }
+
+ sock = null;
+ } finally {
+ if (sock != null) {
+ sock.close();
+ sock = null;
+ }
+ }
+ }
+
+ return statsMaps;
+ }
+
+ protected final class NIOLoader {
+ protected Selector selector;
+ protected int numConns = 0;
+ protected BinaryClient mc;
+ protected Connection[] conns;
+
+ public NIOLoader(BinaryClient mc) {
+ this.mc = mc;
+ }
+
+ private final class Connection {
+
+ public List incoming = new ArrayList();
+ public ByteBuffer outgoing;
+ public SchoonerSockIO sock;
+ public SocketChannel channel;
+ private boolean isDone = false;
+ private final byte[] NOOPFLAG = { MAGIC_RESP, OPCODE_NOOP, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+ public Connection(SchoonerSockIO sock, ArrayList