一个利用内存缓存和磁盘缓存图片的例子

public class BitmapCache {

    public static final String TAG = "debug";

    private LruCache<String, Bitmap> mBitmapCache;

    public BitmapCache() {
// 获取到可用内存的最大值,使用内存超出这个值会引起OutOfMemory异常。
// LruCache 使用的缓存值,使用系统分配给应用程序大小的 1/8
int maxSize = (int) (Runtime.getRuntime().maxMemory() / 8);
mBitmapCache = new LruCache<String, Bitmap>(maxSize) {
@Override
protected int sizeOf(String key, Bitmap bitmap) {
return bitmap.getByteCount();
}
};
} public void add(String key, Bitmap value) {
mBitmapCache.put(key, value);
} public void remove(String key) {
mBitmapCache.remove(key);
} public Bitmap get(String key) {
return mBitmapCache.get(key);
} public boolean containsKey(String key) {
return mBitmapCache.get(key) != null;
} public static long toMB(long byteOfSize) {
return byteOfSize >> 20;
}
}
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream; import android.util.Log; public class BitmapDiskCache { private static final String TAG = "debug"; private static final int DISK_CACHE_INDEX = 0;
private static final int DISK_CACHE_COUNT = 1; private static final int VERSION = 1; private DiskLruCache mDiskLruCache; private DiskLruCache.Editor mEditor; public BitmapDiskCache(File diskCacheDir, long nDiskCacheSize) {
if (!diskCacheDir.exists()) {
diskCacheDir.mkdirs();
} try {
mDiskLruCache = DiskLruCache.open(diskCacheDir, VERSION, DISK_CACHE_COUNT, nDiskCacheSize);
} catch (IOException e) {
Log.e(TAG, "", e);
}
} public InputStream getInputStream(String key) {
if (null != mDiskLruCache) {
try {
DiskLruCache.Snapshot snapshot = mDiskLruCache.get(key);
if (null != snapshot) {
return snapshot.getInputStream(DISK_CACHE_INDEX);
}
} catch (IOException e) {
Log.e(TAG, "", e);
}
}
return null;
} public OutputStream beginEdit(String key) throws IOException {
if (null != mDiskLruCache) {
mEditor = mDiskLruCache.edit(key);
if (null != mEditor) {
return mEditor.newOutputStream(DISK_CACHE_INDEX);
}
}
return null;
} public void endEdit(Boolean isSuccess) throws IOException {
if (null != mEditor) {
if (isSuccess) {
mEditor.commit();
} else {
mEditor.abort();
}
mEditor = null;
}
} public void flush() {
if (null != mDiskLruCache) {
try {
mDiskLruCache.flush();
} catch (IOException e) {
Log.e(TAG, "", e);
}
}
}
}
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.Closeable;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.lang.ref.SoftReference;
import java.net.HttpURLConnection;
import java.net.URL;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.concurrent.Executor;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger; import android.content.Context;
import android.graphics.Bitmap;
import android.os.Build;
import android.os.Build.VERSION_CODES;
import android.os.Environment;
import android.os.Handler;
import android.os.Looper;
import android.os.Message;
import android.os.StatFs;
import android.util.Log;
import android.widget.ImageView; public class BitmapLoader { private static final String TAG = "debug"; private static final long DISK_CACHE_SIZE = 1024 * 1024 * 50; private static final int CPU_COUNT = Runtime.getRuntime().availableProcessors();
private static final int CORE_POOL_SIZE = CPU_COUNT + 1;
private static final int MAX_MUM_POOL_SIZE = CPU_COUNT * 2 + 1;
private static final long KEEP_ALIVE_TIME = 10; private static final int IO_BUFFER_SIZE = 1024 * 8; private static final int MESSAGE_POST_RESULT = 1000; private BitmapCache mBitmapCache; private BitmapDiskCache mBitmapDiskCache; private static ThreadFactory sThreadFactory = new ThreadFactory() {
private AtomicInteger mCounter = new AtomicInteger(1); @Override
public Thread newThread(Runnable r) {
return new Thread(r, "BitmapLoader#" + mCounter.getAndIncrement());
}
}; private static Executor sThreadPoolExecutor = new ThreadPoolExecutor(CORE_POOL_SIZE, MAX_MUM_POOL_SIZE,
KEEP_ALIVE_TIME, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), sThreadFactory); class LoaderResult {
SoftReference<ImageView> mImageViewRef;
Bitmap mBitmap;
String mUri; public LoaderResult(ImageView imageView, String uri, Bitmap bitmap) {
mImageViewRef = new SoftReference<ImageView>(imageView);
mUri = uri;
mBitmap = bitmap;
}
} private Handler mMainHandler = new Handler(Looper.getMainLooper()) {
@Override
public void handleMessage(Message msg) {
LoaderResult result = (LoaderResult) msg.obj;
ImageView imageView = result.mImageViewRef.get();
if (null != imageView) {
imageView.setImageBitmap(result.mBitmap);
String uri = (String) imageView.getTag();
if (uri.equals(result.mUri)) {
imageView.setImageBitmap(result.mBitmap);
} else {
Log.w(TAG, "set image bitmap, but url has changed, ignored!");
}
} else {
Log.w(TAG, "set image bitmap, ImageView released, ignored!");
}
};
}; public BitmapLoader(Context context) { mBitmapCache = new BitmapCache(); File diskCacheDir = new File(getDiskCacheDir(context, "bmp"));
if (!diskCacheDir.exists()) {
diskCacheDir.mkdirs();
}
if (getUsableSpace(diskCacheDir) > DISK_CACHE_SIZE) {
mBitmapDiskCache = new BitmapDiskCache(diskCacheDir, DISK_CACHE_SIZE);
}
} public void bindBitmap(final String uri, final ImageView imageView, final int reqWidth, final int reqHeight) {
String key = hashKeyFormUrl(uri);
Bitmap bitmap = mBitmapCache.get(key);
if (null != bitmap) {
Log.d(TAG, "load from memory cache on bind");
imageView.setImageBitmap(bitmap);
return;
} imageView.setTag(uri);
Runnable loadBitmapTask = new Runnable() {
@Override
public void run() {
Log.d(TAG, Thread.currentThread().getName() + " run");
Bitmap bitmap = loadBitmap(uri, reqWidth, reqHeight);
if (bitmap != null) {
LoaderResult result = new LoaderResult(imageView, uri, bitmap);
mMainHandler.obtainMessage(MESSAGE_POST_RESULT, result).sendToTarget();
}
}
};
sThreadPoolExecutor.execute(loadBitmapTask);
} public Bitmap loadBitmap(String uri, int reqWidth, int reqHeight) {
String key = hashKeyFormUrl(uri);
Bitmap bitmap = mBitmapCache.get(key);
if (null != bitmap) {
Log.d(TAG, "load from memory cache");
return bitmap;
} try {
bitmap = loadBitmapFromDiskCache(key, reqWidth, reqHeight);
if (null != bitmap) {
Log.d(TAG, "load from disk cache");
return bitmap;
}
bitmap = loadBitmapFromHttp(uri, reqWidth, reqHeight);
if (null != bitmap) {
Log.d(TAG, "load from http");
}
} catch (IOException e) {
Log.e(TAG, "", e);
} if (null == bitmap) {
bitmap = downloadBitmapFromUrl(uri, reqWidth, reqHeight);
if (null != bitmap) {
mBitmapCache.add(key, bitmap);
Log.d(TAG, "load from url");
}
} return bitmap;
} private Bitmap loadBitmapFromDiskCache(String key, int reqWidth, int reqHeight) throws IOException {
if (Looper.myLooper() == Looper.getMainLooper()) {
Log.w(TAG, "load bitmap from UI Thread, it's not recommended!");
}
if (mBitmapDiskCache == null) {
return null;
} Bitmap bitmap = null;
InputStream is = mBitmapDiskCache.getInputStream(key);
if (null != is) {
bitmap = BitmapTools.decodeSampledBitmapFromInputStream(is, reqWidth, reqHeight);
if (bitmap != null) {
mBitmapCache.add(key, bitmap);
}
}
return bitmap;
} private Bitmap loadBitmapFromHttp(String url, int reqWidth, int reqHeight) throws IOException {
if (Looper.myLooper() == Looper.getMainLooper()) {
throw new RuntimeException("can not visit network from UI Thread.");
}
if (mBitmapDiskCache == null) {
return null;
} String key = hashKeyFormUrl(url);
OutputStream os = mBitmapDiskCache.beginEdit(key);
if (null != os) {
if (downloadUrlToStream(url, os)) {
mBitmapDiskCache.endEdit(true);
} else {
mBitmapDiskCache.endEdit(false);
}
mBitmapDiskCache.flush();
}
return loadBitmapFromDiskCache(key, reqWidth, reqHeight);
} private Bitmap downloadBitmapFromUrl(String urlString, int reqWidth, int reqHeight) {
HttpURLConnection urlConnection = null;
BufferedInputStream in = null;
try {
final URL url = new URL(urlString);
urlConnection = (HttpURLConnection) url.openConnection();
in = new BufferedInputStream(urlConnection.getInputStream(), IO_BUFFER_SIZE);
return BitmapTools.decodeSampledBitmapFromInputStream(in, reqWidth, reqHeight);
} catch (IOException e) {
Log.e(TAG, "Error in downloadBitmap: ", e);
} finally {
if (urlConnection != null) {
urlConnection.disconnect();
}
close(in);
}
return null;
} private boolean downloadUrlToStream(String urlString, OutputStream outputStream) {
HttpURLConnection urlConnection = null;
BufferedOutputStream out = null;
BufferedInputStream in = null; try {
final URL url = new URL(urlString);
urlConnection = (HttpURLConnection) url.openConnection();
in = new BufferedInputStream(urlConnection.getInputStream(), IO_BUFFER_SIZE);
out = new BufferedOutputStream(outputStream, IO_BUFFER_SIZE); int b;
while ((b = in.read()) != -1) {
out.write(b);
}
return true;
} catch (IOException e) {
Log.e(TAG, "downloadBitmap failed.", e);
} finally {
if (urlConnection != null) {
urlConnection.disconnect();
}
close(out);
close(in);
}
return false;
} private void close(Closeable obj) {
if (null != obj) {
try {
obj.close();
} catch (IOException e) {
Log.e(TAG, "", e);
}
}
} private String hashKeyFormUrl(String url) {
String cacheKey;
try {
final MessageDigest md5Digest = MessageDigest.getInstance("MD5");
md5Digest.update(url.getBytes());
cacheKey = bytesToHexString(md5Digest.digest());
} catch (NoSuchAlgorithmException e) {
cacheKey = String.valueOf(url.hashCode());
}
return cacheKey;
} private String bytesToHexString(byte[] bytes) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < bytes.length; i++) {
String hex = Integer.toHexString(0xFF & bytes[i]);
if (hex.length() == 1) {
sb.append('0');
}
sb.append(hex);
}
return sb.toString();
} private String getDiskCacheDir(Context context, String uniqueName) {
boolean isAvailable = Environment.getExternalStorageState().equals(Environment.MEDIA_MOUNTED);
final String cachePath;
if (isAvailable) {
cachePath = context.getExternalCacheDir().getPath();
} else {
cachePath = context.getCacheDir().getPath();
} return (cachePath + File.separator + uniqueName);
} private long getUsableSpace(File path) {
if (Build.VERSION.SDK_INT >= VERSION_CODES.GINGERBREAD) {
return path.getUsableSpace();
}
final StatFs stats = new StatFs(path.getPath());
return stats.getBlockSizeLong() * stats.getAvailableBlocksLong();
}
}
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/ package com.bitmap; import java.io.BufferedInputStream;
import java.io.BufferedWriter;
import java.io.Closeable;
import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.FilterOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Reader;
import java.io.StringWriter;
import java.io.Writer;
import java.lang.reflect.Array;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit; /**
******************************************************************************
* Taken from the JB source code, can be found in:
* libcore/luni/src/main/java/libcore/io/DiskLruCache.java
* or direct link:
* https://android.googlesource.com/platform/libcore/+/android-4.1.1_r1/luni/src/main/java/libcore/io/DiskLruCache.java
******************************************************************************
*
* A cache that uses a bounded amount of space on a filesystem. Each cache
* entry has a string key and a fixed number of values. Values are byte
* sequences, accessible as streams or files. Each value must be between {@code
* 0} and {@code Integer.MAX_VALUE} bytes in length.
*
* <p>The cache stores its data in a directory on the filesystem. This
* directory must be exclusive to the cache; the cache may delete or overwrite
* files from its directory. It is an error for multiple processes to use the
* same cache directory at the same time.
*
* <p>This cache limits the number of bytes that it will store on the
* filesystem. When the number of stored bytes exceeds the limit, the cache will
* remove entries in the background until the limit is satisfied. The limit is
* not strict: the cache may temporarily exceed it while waiting for files to be
* deleted. The limit does not include filesystem overhead or the cache
* journal so space-sensitive applications should set a conservative limit.
*
* <p>Clients call {@link #edit} to create or update the values of an entry. An
* entry may have only one editor at one time; if a value is not available to be
* edited then {@link #edit} will return null.
* <ul>
* <li>When an entry is being <strong>created</strong> it is necessary to
* supply a full set of values; the empty value should be used as a
* placeholder if necessary.
* <li>When an entry is being <strong>edited</strong>, it is not necessary
* to supply data for every value; values default to their previous
* value.
* </ul>
* Every {@link #edit} call must be matched by a call to {@link Editor#commit}
* or {@link Editor#abort}. Committing is atomic: a read observes the full set
* of values as they were before or after the commit, but never a mix of values.
*
* <p>Clients call {@link #get} to read a snapshot of an entry. The read will
* observe the value at the time that {@link #get} was called. Updates and
* removals after the call do not impact ongoing reads.
*
* <p>This class is tolerant of some I/O errors. If files are missing from the
* filesystem, the corresponding entries will be dropped from the cache. If
* an error occurs while writing a cache value, the edit will fail silently.
* Callers should handle other problems by catching {@code IOException} and
* responding appropriately.
*/
public final class DiskLruCache implements Closeable {
static final String JOURNAL_FILE = "journal";
static final String JOURNAL_FILE_TMP = "journal.tmp";
static final String MAGIC = "libcore.io.DiskLruCache";
static final String VERSION_1 = "1";
static final long ANY_SEQUENCE_NUMBER = -1;
private static final String CLEAN = "CLEAN";
private static final String DIRTY = "DIRTY";
private static final String REMOVE = "REMOVE";
private static final String READ = "READ"; private static final Charset UTF_8 = Charset.forName("UTF-8");
private static final int IO_BUFFER_SIZE = 8 * 1024; /*
* This cache uses a journal file named "journal". A typical journal file
* looks like this:
* libcore.io.DiskLruCache
* 1
* 100
* 2
*
* CLEAN 3400330d1dfc7f3f7f4b8d4d803dfcf6 832 21054
* DIRTY 335c4c6028171cfddfbaae1a9c313c52
* CLEAN 335c4c6028171cfddfbaae1a9c313c52 3934 2342
* REMOVE 335c4c6028171cfddfbaae1a9c313c52
* DIRTY 1ab96a171faeeee38496d8b330771a7a
* CLEAN 1ab96a171faeeee38496d8b330771a7a 1600 234
* READ 335c4c6028171cfddfbaae1a9c313c52
* READ 3400330d1dfc7f3f7f4b8d4d803dfcf6
*
* The first five lines of the journal form its header. They are the
* constant string "libcore.io.DiskLruCache", the disk cache's version,
* the application's version, the value count, and a blank line.
*
* Each of the subsequent lines in the file is a record of the state of a
* cache entry. Each line contains space-separated values: a state, a key,
* and optional state-specific values.
* o DIRTY lines track that an entry is actively being created or updated.
* Every successful DIRTY action should be followed by a CLEAN or REMOVE
* action. DIRTY lines without a matching CLEAN or REMOVE indicate that
* temporary files may need to be deleted.
* o CLEAN lines track a cache entry that has been successfully published
* and may be read. A publish line is followed by the lengths of each of
* its values.
* o READ lines track accesses for LRU.
* o REMOVE lines track entries that have been deleted.
*
* The journal file is appended to as cache operations occur. The journal may
* occasionally be compacted by dropping redundant lines. A temporary file named
* "journal.tmp" will be used during compaction; that file should be deleted if
* it exists when the cache is opened.
*/ private final File directory;
private final File journalFile;
private final File journalFileTmp;
private final int appVersion;
private final long maxSize;
private final int valueCount;
private long size = 0;
private Writer journalWriter;
private final LinkedHashMap<String, Entry> lruEntries
= new LinkedHashMap<String, Entry>(0, 0.75f, true);
private int redundantOpCount; /**
* To differentiate between old and current snapshots, each entry is given
* a sequence number each time an edit is committed. A snapshot is stale if
* its sequence number is not equal to its entry's sequence number.
*/
private long nextSequenceNumber = 0; /* From java.util.Arrays */
@SuppressWarnings("unchecked")
private static <T> T[] copyOfRange(T[] original, int start, int end) {
final int originalLength = original.length; // For exception priority compatibility.
if (start > end) {
throw new IllegalArgumentException();
}
if (start < 0 || start > originalLength) {
throw new ArrayIndexOutOfBoundsException();
}
final int resultLength = end - start;
final int copyLength = Math.min(resultLength, originalLength - start);
final T[] result = (T[]) Array
.newInstance(original.getClass().getComponentType(), resultLength);
System.arraycopy(original, start, result, 0, copyLength);
return result;
} /**
* Returns the remainder of 'reader' as a string, closing it when done.
*/
public static String readFully(Reader reader) throws IOException {
try {
StringWriter writer = new StringWriter();
char[] buffer = new char[1024];
int count;
while ((count = reader.read(buffer)) != -1) {
writer.write(buffer, 0, count);
}
return writer.toString();
} finally {
reader.close();
}
} /**
* Returns the ASCII characters up to but not including the next "\r\n", or
* "\n".
*
* @throws java.io.EOFException if the stream is exhausted before the next newline
* character.
*/
public static String readAsciiLine(InputStream in) throws IOException {
// TODO: support UTF-8 here instead StringBuilder result = new StringBuilder(80);
while (true) {
int c = in.read();
if (c == -1) {
throw new EOFException();
} else if (c == '\n') {
break;
} result.append((char) c);
}
int length = result.length();
if (length > 0 && result.charAt(length - 1) == '\r') {
result.setLength(length - 1);
}
return result.toString();
} /**
* Closes 'closeable', ignoring any checked exceptions. Does nothing if 'closeable' is null.
*/
public static void closeQuietly(Closeable closeable) {
if (closeable != null) {
try {
closeable.close();
} catch (RuntimeException rethrown) {
throw rethrown;
} catch (Exception ignored) {
}
}
} /**
* Recursively delete everything in {@code dir}.
*/
// TODO: this should specify paths as Strings rather than as Files
public static void deleteContents(File dir) throws IOException {
File[] files = dir.listFiles();
if (files == null) {
throw new IllegalArgumentException("not a directory: " + dir);
}
for (File file : files) {
if (file.isDirectory()) {
deleteContents(file);
}
if (!file.delete()) {
throw new IOException("failed to delete file: " + file);
}
}
} /** This cache uses a single background thread to evict entries. */
private final ExecutorService executorService = new ThreadPoolExecutor(0, 1,
60L, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>());
private final Callable<Void> cleanupCallable = new Callable<Void>() {
@Override public Void call() throws Exception {
synchronized (DiskLruCache.this) {
if (journalWriter == null) {
return null; // closed
}
trimToSize();
if (journalRebuildRequired()) {
rebuildJournal();
redundantOpCount = 0;
}
}
return null;
}
}; private DiskLruCache(File directory, int appVersion, int valueCount, long maxSize) {
this.directory = directory;
this.appVersion = appVersion;
this.journalFile = new File(directory, JOURNAL_FILE);
this.journalFileTmp = new File(directory, JOURNAL_FILE_TMP);
this.valueCount = valueCount;
this.maxSize = maxSize;
} /**
* Opens the cache in {@code directory}, creating a cache if none exists
* there.
*
* @param directory a writable directory
* @param appVersion
* @param valueCount the number of values per cache entry. Must be positive.
* @param maxSize the maximum number of bytes this cache should use to store
* @throws java.io.IOException if reading or writing the cache directory fails
*/
public static DiskLruCache open(File directory, int appVersion, int valueCount, long maxSize)
throws IOException {
if (maxSize <= 0) {
throw new IllegalArgumentException("maxSize <= 0");
}
if (valueCount <= 0) {
throw new IllegalArgumentException("valueCount <= 0");
} // prefer to pick up where we left off
DiskLruCache cache = new DiskLruCache(directory, appVersion, valueCount, maxSize);
if (cache.journalFile.exists()) {
try {
cache.readJournal();
cache.processJournal();
cache.journalWriter = new BufferedWriter(new FileWriter(cache.journalFile, true),
IO_BUFFER_SIZE);
return cache;
} catch (IOException journalIsCorrupt) {
// System.logW("DiskLruCache " + directory + " is corrupt: "
// + journalIsCorrupt.getMessage() + ", removing");
cache.delete();
}
} // create a new empty cache
directory.mkdirs();
cache = new DiskLruCache(directory, appVersion, valueCount, maxSize);
cache.rebuildJournal();
return cache;
} private void readJournal() throws IOException {
InputStream in = new BufferedInputStream(new FileInputStream(journalFile), IO_BUFFER_SIZE);
try {
String magic = readAsciiLine(in);
String version = readAsciiLine(in);
String appVersionString = readAsciiLine(in);
String valueCountString = readAsciiLine(in);
String blank = readAsciiLine(in);
if (!MAGIC.equals(magic)
|| !VERSION_1.equals(version)
|| !Integer.toString(appVersion).equals(appVersionString)
|| !Integer.toString(valueCount).equals(valueCountString)
|| !"".equals(blank)) {
throw new IOException("unexpected journal header: ["
+ magic + ", " + version + ", " + valueCountString + ", " + blank + "]");
} while (true) {
try {
readJournalLine(readAsciiLine(in));
} catch (EOFException endOfJournal) {
break;
}
}
} finally {
closeQuietly(in);
}
} private void readJournalLine(String line) throws IOException {
String[] parts = line.split(" ");
if (parts.length < 2) {
throw new IOException("unexpected journal line: " + line);
} String key = parts[1];
if (parts[0].equals(REMOVE) && parts.length == 2) {
lruEntries.remove(key);
return;
} Entry entry = lruEntries.get(key);
if (entry == null) {
entry = new Entry(key);
lruEntries.put(key, entry);
} if (parts[0].equals(CLEAN) && parts.length == 2 + valueCount) {
entry.readable = true;
entry.currentEditor = null;
entry.setLengths(copyOfRange(parts, 2, parts.length));
} else if (parts[0].equals(DIRTY) && parts.length == 2) {
entry.currentEditor = new Editor(entry);
} else if (parts[0].equals(READ) && parts.length == 2) {
// this work was already done by calling lruEntries.get()
} else {
throw new IOException("unexpected journal line: " + line);
}
} /**
* Computes the initial size and collects garbage as a part of opening the
* cache. Dirty entries are assumed to be inconsistent and will be deleted.
*/
private void processJournal() throws IOException {
deleteIfExists(journalFileTmp);
for (Iterator<Entry> i = lruEntries.values().iterator(); i.hasNext(); ) {
Entry entry = i.next();
if (entry.currentEditor == null) {
for (int t = 0; t < valueCount; t++) {
size += entry.lengths[t];
}
} else {
entry.currentEditor = null;
for (int t = 0; t < valueCount; t++) {
deleteIfExists(entry.getCleanFile(t));
deleteIfExists(entry.getDirtyFile(t));
}
i.remove();
}
}
} /**
* Creates a new journal that omits redundant information. This replaces the
* current journal if it exists.
*/
private synchronized void rebuildJournal() throws IOException {
if (journalWriter != null) {
journalWriter.close();
} Writer writer = new BufferedWriter(new FileWriter(journalFileTmp), IO_BUFFER_SIZE);
writer.write(MAGIC);
writer.write("\n");
writer.write(VERSION_1);
writer.write("\n");
writer.write(Integer.toString(appVersion));
writer.write("\n");
writer.write(Integer.toString(valueCount));
writer.write("\n");
writer.write("\n"); for (Entry entry : lruEntries.values()) {
if (entry.currentEditor != null) {
writer.write(DIRTY + ' ' + entry.key + '\n');
} else {
writer.write(CLEAN + ' ' + entry.key + entry.getLengths() + '\n');
}
} writer.close();
journalFileTmp.renameTo(journalFile);
journalWriter = new BufferedWriter(new FileWriter(journalFile, true), IO_BUFFER_SIZE);
} private static void deleteIfExists(File file) throws IOException {
// try {
// Libcore.os.remove(file.getPath());
// } catch (ErrnoException errnoException) {
// if (errnoException.errno != OsConstants.ENOENT) {
// throw errnoException.rethrowAsIOException();
// }
// }
if (file.exists() && !file.delete()) {
throw new IOException();
}
} /**
* Returns a snapshot of the entry named {@code key}, or null if it doesn't
* exist is not currently readable. If a value is returned, it is moved to
* the head of the LRU queue.
*/
public synchronized Snapshot get(String key) throws IOException {
checkNotClosed();
validateKey(key);
Entry entry = lruEntries.get(key);
if (entry == null) {
return null;
} if (!entry.readable) {
return null;
} /*
* Open all streams eagerly to guarantee that we see a single published
* snapshot. If we opened streams lazily then the streams could come
* from different edits.
*/
InputStream[] ins = new InputStream[valueCount];
try {
for (int i = 0; i < valueCount; i++) {
ins[i] = new FileInputStream(entry.getCleanFile(i));
}
} catch (FileNotFoundException e) {
// a file must have been deleted manually!
return null;
} redundantOpCount++;
journalWriter.append(READ + ' ' + key + '\n');
if (journalRebuildRequired()) {
executorService.submit(cleanupCallable);
} return new Snapshot(key, entry.sequenceNumber, ins);
} /**
* Returns an editor for the entry named {@code key}, or null if another
* edit is in progress.
*/
public Editor edit(String key) throws IOException {
return edit(key, ANY_SEQUENCE_NUMBER);
} private synchronized Editor edit(String key, long expectedSequenceNumber) throws IOException {
checkNotClosed();
validateKey(key);
Entry entry = lruEntries.get(key);
if (expectedSequenceNumber != ANY_SEQUENCE_NUMBER
&& (entry == null || entry.sequenceNumber != expectedSequenceNumber)) {
return null; // snapshot is stale
}
if (entry == null) {
entry = new Entry(key);
lruEntries.put(key, entry);
} else if (entry.currentEditor != null) {
return null; // another edit is in progress
} Editor editor = new Editor(entry);
entry.currentEditor = editor; // flush the journal before creating files to prevent file leaks
journalWriter.write(DIRTY + ' ' + key + '\n');
journalWriter.flush();
return editor;
} /**
* Returns the directory where this cache stores its data.
*/
public File getDirectory() {
return directory;
} /**
* Returns the maximum number of bytes that this cache should use to store
* its data.
*/
public long maxSize() {
return maxSize;
} /**
* Returns the number of bytes currently being used to store the values in
* this cache. This may be greater than the max size if a background
* deletion is pending.
*/
public synchronized long size() {
return size;
} private synchronized void completeEdit(Editor editor, boolean success) throws IOException {
Entry entry = editor.entry;
if (entry.currentEditor != editor) {
throw new IllegalStateException();
} // if this edit is creating the entry for the first time, every index must have a value
if (success && !entry.readable) {
for (int i = 0; i < valueCount; i++) {
if (!entry.getDirtyFile(i).exists()) {
editor.abort();
throw new IllegalStateException("edit didn't create file " + i);
}
}
} for (int i = 0; i < valueCount; i++) {
File dirty = entry.getDirtyFile(i);
if (success) {
if (dirty.exists()) {
File clean = entry.getCleanFile(i);
dirty.renameTo(clean);
long oldLength = entry.lengths[i];
long newLength = clean.length();
entry.lengths[i] = newLength;
size = size - oldLength + newLength;
}
} else {
deleteIfExists(dirty);
}
} redundantOpCount++;
entry.currentEditor = null;
if (entry.readable | success) {
entry.readable = true;
journalWriter.write(CLEAN + ' ' + entry.key + entry.getLengths() + '\n');
if (success) {
entry.sequenceNumber = nextSequenceNumber++;
}
} else {
lruEntries.remove(entry.key);
journalWriter.write(REMOVE + ' ' + entry.key + '\n');
} if (size > maxSize || journalRebuildRequired()) {
executorService.submit(cleanupCallable);
}
} /**
* We only rebuild the journal when it will halve the size of the journal
* and eliminate at least 2000 ops.
*/
private boolean journalRebuildRequired() {
final int REDUNDANT_OP_COMPACT_THRESHOLD = 2000;
return redundantOpCount >= REDUNDANT_OP_COMPACT_THRESHOLD
&& redundantOpCount >= lruEntries.size();
} /**
* Drops the entry for {@code key} if it exists and can be removed. Entries
* actively being edited cannot be removed.
*
* @return true if an entry was removed.
*/
public synchronized boolean remove(String key) throws IOException {
checkNotClosed();
validateKey(key);
Entry entry = lruEntries.get(key);
if (entry == null || entry.currentEditor != null) {
return false;
} for (int i = 0; i < valueCount; i++) {
File file = entry.getCleanFile(i);
if (!file.delete()) {
throw new IOException("failed to delete " + file);
}
size -= entry.lengths[i];
entry.lengths[i] = 0;
} redundantOpCount++;
journalWriter.append(REMOVE + ' ' + key + '\n');
lruEntries.remove(key); if (journalRebuildRequired()) {
executorService.submit(cleanupCallable);
} return true;
} /**
* Returns true if this cache has been closed.
*/
public boolean isClosed() {
return journalWriter == null;
} private void checkNotClosed() {
if (journalWriter == null) {
throw new IllegalStateException("cache is closed");
}
} /**
* Force buffered operations to the filesystem.
*/
public synchronized void flush() throws IOException {
checkNotClosed();
trimToSize();
journalWriter.flush();
} /**
* Closes this cache. Stored values will remain on the filesystem.
*/
public synchronized void close() throws IOException {
if (journalWriter == null) {
return; // already closed
}
for (Entry entry : new ArrayList<Entry>(lruEntries.values())) {
if (entry.currentEditor != null) {
entry.currentEditor.abort();
}
}
trimToSize();
journalWriter.close();
journalWriter = null;
} private void trimToSize() throws IOException {
while (size > maxSize) {
// Map.Entry<String, Entry> toEvict = lruEntries.eldest();
final Map.Entry<String, Entry> toEvict = lruEntries.entrySet().iterator().next();
remove(toEvict.getKey());
}
} /**
* Closes the cache and deletes all of its stored values. This will delete
* all files in the cache directory including files that weren't created by
* the cache.
*/
public void delete() throws IOException {
close();
deleteContents(directory);
} private void validateKey(String key) {
if (key.contains(" ") || key.contains("\n") || key.contains("\r")) {
throw new IllegalArgumentException(
"keys must not contain spaces or newlines: \"" + key + "\"");
}
} private static String inputStreamToString(InputStream in) throws IOException {
return readFully(new InputStreamReader(in, UTF_8));
} /**
* A snapshot of the values for an entry.
*/
public final class Snapshot implements Closeable {
private final String key;
private final long sequenceNumber;
private final InputStream[] ins; private Snapshot(String key, long sequenceNumber, InputStream[] ins) {
this.key = key;
this.sequenceNumber = sequenceNumber;
this.ins = ins;
} /**
* Returns an editor for this snapshot's entry, or null if either the
* entry has changed since this snapshot was created or if another edit
* is in progress.
*/
public Editor edit() throws IOException {
return DiskLruCache.this.edit(key, sequenceNumber);
} /**
* Returns the unbuffered stream with the value for {@code index}.
*/
public InputStream getInputStream(int index) {
return ins[index];
} /**
* Returns the string value for {@code index}.
*/
public String getString(int index) throws IOException {
return inputStreamToString(getInputStream(index));
} @Override public void close() {
for (InputStream in : ins) {
closeQuietly(in);
}
}
} /**
* Edits the values for an entry.
*/
public final class Editor {
private final Entry entry;
private boolean hasErrors; private Editor(Entry entry) {
this.entry = entry;
} /**
* Returns an unbuffered input stream to read the last committed value,
* or null if no value has been committed.
*/
public InputStream newInputStream(int index) throws IOException {
synchronized (DiskLruCache.this) {
if (entry.currentEditor != this) {
throw new IllegalStateException();
}
if (!entry.readable) {
return null;
}
return new FileInputStream(entry.getCleanFile(index));
}
} /**
* Returns the last committed value as a string, or null if no value
* has been committed.
*/
public String getString(int index) throws IOException {
InputStream in = newInputStream(index);
return in != null ? inputStreamToString(in) : null;
} /**
* Returns a new unbuffered output stream to write the value at
* {@code index}. If the underlying output stream encounters errors
* when writing to the filesystem, this edit will be aborted when
* {@link #commit} is called. The returned output stream does not throw
* IOExceptions.
*/
public OutputStream newOutputStream(int index) throws IOException {
synchronized (DiskLruCache.this) {
if (entry.currentEditor != this) {
throw new IllegalStateException();
}
return new FaultHidingOutputStream(new FileOutputStream(entry.getDirtyFile(index)));
}
} /**
* Sets the value at {@code index} to {@code value}.
*/
public void set(int index, String value) throws IOException {
Writer writer = null;
try {
writer = new OutputStreamWriter(newOutputStream(index), UTF_8);
writer.write(value);
} finally {
closeQuietly(writer);
}
} /**
* Commits this edit so it is visible to readers. This releases the
* edit lock so another edit may be started on the same key.
*/
public void commit() throws IOException {
if (hasErrors) {
completeEdit(this, false);
remove(entry.key); // the previous entry is stale
} else {
completeEdit(this, true);
}
} /**
* Aborts this edit. This releases the edit lock so another edit may be
* started on the same key.
*/
public void abort() throws IOException {
completeEdit(this, false);
} private class FaultHidingOutputStream extends FilterOutputStream {
private FaultHidingOutputStream(OutputStream out) {
super(out);
} @Override public void write(int oneByte) {
try {
out.write(oneByte);
} catch (IOException e) {
hasErrors = true;
}
} @Override public void write(byte[] buffer, int offset, int length) {
try {
out.write(buffer, offset, length);
} catch (IOException e) {
hasErrors = true;
}
} @Override public void close() {
try {
out.close();
} catch (IOException e) {
hasErrors = true;
}
} @Override public void flush() {
try {
out.flush();
} catch (IOException e) {
hasErrors = true;
}
}
}
} private final class Entry {
private final String key; /** Lengths of this entry's files. */
private final long[] lengths; /** True if this entry has ever been published */
private boolean readable; /** The ongoing edit or null if this entry is not being edited. */
private Editor currentEditor; /** The sequence number of the most recently committed edit to this entry. */
private long sequenceNumber; private Entry(String key) {
this.key = key;
this.lengths = new long[valueCount];
} public String getLengths() throws IOException {
StringBuilder result = new StringBuilder();
for (long size : lengths) {
result.append(' ').append(size);
}
return result.toString();
} /**
* Set lengths using decimal numbers like "10123".
*/
private void setLengths(String[] strings) throws IOException {
if (strings.length != valueCount) {
throw invalidLengths(strings);
} try {
for (int i = 0; i < strings.length; i++) {
lengths[i] = Long.parseLong(strings[i]);
}
} catch (NumberFormatException e) {
throw invalidLengths(strings);
}
} private IOException invalidLengths(String[] strings) throws IOException {
throw new IOException("unexpected journal line: " + Arrays.toString(strings));
} public File getCleanFile(int i) {
return new File(directory, key + "." + i);
} public File getDirtyFile(int i) {
return new File(directory, key + "." + i + ".tmp");
}
}
}
import java.io.InputStream;

import android.content.res.Resources;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.util.Log; public class BitmapTools { private static final String TAG = "debug"; public static class Size { public Size(int width, int height) {
this.width = width;
this.height = height;
} public void resize(int width, int height) {
this.width = width;
this.height = height;
} public int getWidth() {
return width;
} public int getHeight() {
return height;
} @Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false; Size size = (Size) o; if (width != size.width)
return false;
return height == size.height;
} @Override
public int hashCode() {
int result = width;
result = 31 * result + height;
return result;
} @Override
public String toString() {
return "Size{" + "width=" + width + ", height=" + height + '}';
} private int width;
private int height;
} public static Bitmap decodeSampledBitmapFromPath(String pathName, int reqWidth, int reqHeight) {
// First decode with inJustDecodeBounds=true to check dimensions
BitmapFactory.Options opts = new BitmapFactory.Options();
opts.inJustDecodeBounds = true;
BitmapFactory.decodeFile(pathName, opts);
// Calculate inSampleSize
opts.inSampleSize = calculateInSampleSize(opts, reqWidth, reqHeight);
// Decode bitmap with inSampleSize set
opts.inJustDecodeBounds = false;
return BitmapFactory.decodeFile(pathName, opts);
} public static Bitmap decodeSampledBitmapFromData(byte[] data, int reqWidth, int reqHeight) {
// First decode with inJustDecodeBounds=true to check dimensions
BitmapFactory.Options opts = new BitmapFactory.Options();
opts.inJustDecodeBounds = true;
BitmapFactory.decodeByteArray(data, 0, data.length, opts);
// Calculate inSampleSize
opts.inSampleSize = calculateInSampleSize(opts, reqWidth, reqHeight);
// Decode bitmap with inSampleSize set
opts.inJustDecodeBounds = false;
return BitmapFactory.decodeByteArray(data, 0, data.length, opts);
} public static Bitmap decodeSampledBitmapFromResource(Resources res, int id, int reqWidth, int reqHeight) {
// First decode with inJustDecodeBounds=true to check dimensions
BitmapFactory.Options opts = new BitmapFactory.Options();
opts.inJustDecodeBounds = true;
BitmapFactory.decodeResource(res, id, opts);
// Calculate inSampleSize
opts.inSampleSize = calculateInSampleSize(opts, reqWidth, reqHeight);
// Decode bitmap with inSampleSize set
opts.inJustDecodeBounds = false;
return BitmapFactory.decodeResource(res, id, opts);
} public static Bitmap decodeSampledBitmapFromInputStream(InputStream is, int reqWidth, int reqHeight) {
Bitmap bitmap = BitmapFactory.decodeStream(is);
if (bitmap == null)
return null;
// Log.d(TAG, "req size: " + reqWidth + ", " + reqHeight);
// Log.d(TAG, "bitmap size: " + bitmap.getWidth() + ", " + bitmap.getHeight());
if (bitmap.getWidth() > reqWidth || bitmap.getHeight() > reqHeight) {
Size srcSize = new Size(bitmap.getWidth(), bitmap.getHeight());
Size reqSize = new Size(reqWidth, reqHeight);
Size newSize = calculateNewSize(srcSize, reqSize);
bitmap = Bitmap.createScaledBitmap(bitmap, newSize.getWidth(), newSize.getHeight(), false);
}
return bitmap;
} public static Size calculateNewSize(Size srcSize, Size reqSize) {
int newWidth;
int newHeight;
if (srcSize.getWidth() > srcSize.getHeight()) {
newWidth = reqSize.getWidth();
newHeight = newWidth * srcSize.getHeight() / srcSize.getWidth();
} else {
newHeight = reqSize.getHeight();
newWidth = newHeight * srcSize.getWidth() / srcSize.getHeight();
}
return new Size(newWidth, newHeight);
} public static int calculateInSampleSize(BitmapFactory.Options options, int reqWidth, int reqHeight) {
// Raw height and width of image
final int height = options.outHeight;
final int width = options.outWidth;
int inSampleSize = 1;
if (height > reqHeight || width > reqWidth) {
final int halfHeight = height / 2;
final int halfWidth = width / 2;
// Calculate the largest inSampleSize value that is a power of 2 and
// keeps both height and width larger than the requested height and
// width.
while ((halfHeight / inSampleSize) > reqHeight && (halfWidth / inSampleSize) > reqWidth) {
inSampleSize *= 2;
}
}
Log.d(TAG, "inSampleSize=" + inSampleSize);
return inSampleSize;
}
}

Android缓存的更多相关文章

  1. Android 缓存

    1.Android缓存机制&一个缓存框架推荐 http://blog.csdn.net/shakespeare001/article/details/51695358 2.ASimpleCac ...

  2. Android缓存处理

    Android缓存: 採用缓存,能够进一步大大缓解数据交互的压力,又能提供一定的离线浏览.下边我简略列举一下缓存管理的适用环境: 1. 提供网络服务的应用 2. 数据更新不须要实时更新.哪怕是3-5分 ...

  3. android缓存具体解释

    Android缓存: 採用缓存,能够进一步大大缓解数据交互的压力.又能提供一定的离线浏览.下边我简略列举一下缓存管理的适用环境: 1. 提供网络服务的应用 2. 数据更新不须要实时更新,哪怕是3-5分 ...

  4. 【转】彻底解析Android缓存机制——LruCache

    彻底解析Android缓存机制——LruCache 关于Android的三级缓存,其中主要的就是内存缓存和硬盘缓存.这两种缓存机制的实现都应用到了LruCache算法,今天我们就从使用到源码解析,来彻 ...

  5. Android缓存学习入门(二)

    本文主要包括以下内容 内存缓存策略 文件缓存策略 内存缓存策略 当有一个图片要去从网络下载的时候,我们并不会直接去从网络下载,因为在这个时代,用户的流量是宝贵的,耗流量的应用是不会得到用户的青睐的.那 ...

  6. Android缓存学习入门

    本文主要包括以下内容 利用LruCache实现内存缓存 利用DiskLruCache实现磁盘缓存 LruCache与DiskLruCache结合实例 利用了缓存机制的瀑布流实例 内存缓存的实现 pub ...

  7. Android 缓存目录 Context.getExternalFilesDir()和Context.getExternalCacheDir()方法

    一.基础知识 应用程序在运行的过程中如果需要向手机上保存数据,一般是把数据保存在SDcard中的.大部分应用是直接在SDCard的根目录下创建一个文件夹,然后把数据保存在该文件夹中.这样当该应用被卸载 ...

  8. Android缓存技术

    android应用程序中 1. 尽可能的把文件缓存到本地.可以是 memory,cache dir,甚至是放进 SD 卡中(比如大的图片和音视频).    可以设置双重缓冲,较大的图片或者音频放到SD ...

  9. android 缓存Bitmap - 开发文档翻译

    由于本人英文能力实在有限,不足之初敬请谅解 本博客只要没有注明“转”,那么均为原创,转贴请注明本博客链接链接 Loading a single bitmap into your user interf ...

  10. android缓存之Lrucache 和LinkedHashMap

    两者的区别 网上有很多人使用软引用加载图片的多 ,但是现在已经不再推荐使用这种方式了,(1)因为从 Android 2.3 (API Level 9)开始,垃圾回收器会更倾向于回收持有软引用或弱引用的 ...

随机推荐

  1. 网络流小结(HNOI2019之前)

    \(\text{一:Dinic最大流}\) 最坏复杂度 \({\mathcal O(n^2m)}\) 一般可以处理 \(10^4\) ~ \(10^5\) 的网络. struct Edge { int ...

  2. tomcat 最大并发连接数设置

    转自: http://blog.csdn.net/qysh123/article/details/11678903 这是个很简单的问题,但是搜了一圈,发现大家都写错了.所以这里总结一下: 几乎所有的中 ...

  3. ES6的新特性(15)——Promise 对象

    Promise 对象 Promise 的含义 Promise 是异步编程的一种解决方案,比传统的解决方案——回调函数和事件——更合理和更强大.它由社区最早提出和实现,ES6 将其写进了语言标准,统一了 ...

  4. Github三次学习

    作者声明:本文参照aicoder.com马伦老师的文档,根据自己的学习情况而做的笔记,仅供交流学习. Git入门到高级教程 为什么要进行项目文件的版本管理 代码备份和恢复 团队开发和协作流程 项目分支 ...

  5. python2/3 发送https请求时,告警关闭方法

    问题: 使用Python3 requests发送HTTPS请求,已经关闭认证(verify=False)情况下,控制台会输出以下错误: InsecureRequestWarning: Unverifi ...

  6. react native组件的创建

    react native组件的创建 react文件加载顺序: react项目启动后,先加载index.js.在index.js中可以指向首页. import { AppRegistry } from ...

  7. 敏捷开发 Scrum 综述

    敏捷开发 Scrum 综述 这一星期学习了敏捷开发,然后阅读了相关的书籍,从网上查找了很多相关的资料,对敏捷开发scrum有了更加深刻了理解,对敏捷开发做了如下总结: 一.什么是敏捷开发? 敏捷开发提 ...

  8. Firefox必备的24款web开发插件

    from: 软件过滤: 排序:收录时间 | 浏览数 网页开发FireFox插件 Firebug Firebug是Firefox下的一款开发类插件,现属于Firefox的 五星级强力推荐插件之一.它集H ...

  9. virtual judge 本地部署方案

    这是一种将自己的电脑当作服务器来部署一个vj的方法,我也是参考前辈们的做法稍作了改动,如果在服务器上部署的话需要在细节上稍作改动: 一.什么是Virtual Judge? vj的工作原理什么?  vj ...

  10. 0302思考&回答

    看完这两个网页,我们可以看出it行业始终是一门热门行业,在现在这个人潮汹涌的人才市场,面对严峻的就业形势,我们应该拿什么去参见招聘?人多而工作职位有限,这警醒我们必须拥有一技之长,否则则会被淘汰.如果 ...