CacheInfo.java 5.61 KB
Newer Older
1 2 3 4 5
/**
 * $RCSfile$
 * $Revision: $
 * $Date: $
 *
6
 * Copyright (C) 2005-2008 Jive Software. All rights reserved.
7 8
 *
 * This software is published under the terms of the GNU Public License (GPL),
9 10
 * a copy of which is included in this distribution, or a commercial license
 * agreement with Jive.
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
 */

package org.jivesoftware.openfire.container;

import java.util.Map;

/**
 * Configuration to use when creating caches. Caches can be used when running stand alone or when
 * running in a cluster. When running in a cluster a few extra parameters might be needed. Read
 * {@link #getParams()} for more information.
 *
 * @author Gaston Dombiak
 */
public class CacheInfo {
    /**
     * Name of the cache
     */
    private String cacheName;
    /**
     * Type of cache to use when running in a cluster. When not running in a cluster this value is not used.
     */
    private Type type;
    /**
     * Map with the configuration of the cache. Openfire expects the following properties to exist:
     * <ul>
     *  <li><b>back-size-high</b> - Maximum size of the cache. Size is in bytes. Zero means that there is no limit.</li>
     *  <li><b>back-size-low</b> - Size in byte of the cache after a clean up. Use zero to place no limit.</li>
     *  <li><b>back-expiry</b> - minutes, hours or days before content is expired. 10m, 12h or 2d. Zero means never.</li>
     * </ul>
     */
    private Map<String, String> params;

    /**
     * Creates the configuration to use for the specified cache. Caches can be used when running
     * as a standalone application or when running in a cluster. When running in a cluster a few
     * extra configuration are going to be needed. Read {@link #getParams()} for more information.
     *
     * @param cacheName name of the cache.
     * @param type type of cache to use when running in a cluster. Ignored when running as standalone.
     * @param params extra parameters that define cache properties like max size or expiration.
     */
    public CacheInfo(String cacheName, Type type, Map<String, String> params) {
        this.cacheName = cacheName;
        this.type = type;
        this.params = params;
    }

    public String getCacheName() {
        return cacheName;
    }

    public Type getType() {
        return type;
    }

    /**
     * Returns a map with the configuration to use for the cache. When running standalone the following
     * properties are required.
     * <ul>
     *  <li><b>back-size-high</b> - Maximum size of the cache. Size is in bytes. Zero means that there is no limit.</li>
     *  <li><b>back-expiry</b> - minutes, hours or days before content is expired. 10m, 12h or 2d. Zero means never.</li>
     * </ul>
     * When running in a cluster this extra property is required. More properties can be defined depending on the
     * clustering solution being used.
     * <ul>
     *  <li><b>back-size-low</b> - Size in byte of the cache after a clean up. Use zero to place no limit.</li>
     * </ul>
     *
     * @return map with the configuration to use for the cache.
     */
    public Map<String, String> getParams() {
        return params;
    }

    public static enum Type {
        /**
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
         * Data is fully replicated to every member in the cluster. Offers the fastest read performance. Clustered,
         * fault-tolerant cache with linear performance scalability for reads, but poor scalability for writes
         * (as writes must be processed by every member in the cluster). Because data is replicated to all machines,
         * adding servers does not increase aggregate cache capacity.
         */
        replicated("replicated"),
        /**
         * OptimisticCache is a clustered cache implementation similar to the ReplicatedCache implementation, but
         * without any concurrency control. This implementation has the highest possible throughput. It also allows
         * to use an alternative underlying store for the cached data (for example, a MRU/MFU-based cache). However,
         * if two cluster members are independently pruning or purging the underlying local stores, it is possible
         * that a cluster member may have a different store content than that held by another cluster member.
         * This cache is good for frequent reads and not frequent writes. However, this cache will not scale fine
         * if it has lot of content that will end up consuming all the JVM memory. For this case a
         * {@link #distributed} is a better option.
102 103 104 105
         */
        optimistic("optimistic"),
        /**
         * An distributed-scheme defines caches where the storage for entries is partitioned across cluster nodes.
106 107 108 109 110
         * A hybrid cache; fronts a fault-tolerant, scalable partitioned cache with a local cache. Near cache
         * invalidates front cache entries, using configurable invalidation strategy, and provides excellent
         * performance and synchronization. Near cache backed by a partitioned cache offers zero-millisecond local
         * access for repeat data access, while enabling concurrency and ensuring coherency and fail-over,
         * effectively combining the best attributes of replicated and partitioned caches.
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
         */
        distributed("near-distributed");

        private String name;
        Type(String name) {
            this.name = name;
        }

        public static Type valueof(String name) {
            if ("optimistic".equals(name)) {
                return optimistic;
            }
            return distributed;
        }

        public String getName() {
            return name;
        }
    }
}