加載超清大圖是會引起內(nèi)存爆表的問題,最近一直困擾著我择懂。
SDWebImage在加載大圖時做的不是很好侦厚,加載大圖內(nèi)存爆表。YYWebImage會好一點括眠,但還是不行。
當(dāng)不要求圖片質(zhì)量的情況下倍权,最好是在上傳圖片的時候就壓縮圖片掷豺,如果顯示的時候再壓縮的話也會導(dǎo)致內(nèi)存暴漲,壓縮也是很占內(nèi)存的薄声。
而對于UIImageJPEGRepresentation壓縮方法当船,不能降低顯示內(nèi)存。因為顯示圖片所占的內(nèi)存大小只與圖片的分辨率有關(guān)默辨,與圖片的大小無關(guān)德频。UIImageJPEGRepresentation壓縮方法只能降低圖片大小,分辨率不變廓奕。
壓縮圖片的兩張方式看我寫的這篇http://www.reibang.com/p/8150a8e7c0e4
后來找到了蘋果的一個官方建議加載大圖的demo
https://developer.apple.com/library/ios/samplecode/LargeImageDownsizing/
我改裝之后的代碼抱婉,可以直接用:
#import "LargeImageDispose.h"
#define IPAD1_IPHONE3GS
#ifdef IPAD1_IPHONE3GS
# define kDestImageSizeMB 60.0f // The resulting image will be (x)MB of uncompressed image data.
# define kSourceImageTileSizeMB 20.0f // The tile size will be (x)MB of uncompressed image data.
#endif
/* These constants are suggested initial values for iPad2, and iPhone 4 */
//#define IPAD2_IPHONE4
#ifdef IPAD2_IPHONE4
# define kDestImageSizeMB 120.0f // The resulting image will be (x)MB of uncompressed image data.
# define kSourceImageTileSizeMB 40.0f // The tile size will be (x)MB of uncompressed image data.
#endif
/* These constants are suggested initial values for iPhone3G, iPod2 and earlier devices */
//#define IPHONE3G_IPOD2_AND_EARLIER
#ifdef IPHONE3G_IPOD2_AND_EARLIER
# define kDestImageSizeMB 30.0f // The resulting image will be (x)MB of uncompressed image data.
# define kSourceImageTileSizeMB 10.0f // The tile size will be (x)MB of uncompressed image data.
#endif
#define bytesPerMB 1048576.0f
#define bytesPerPixel 4.0f
#define pixelsPerMB ( bytesPerMB / bytesPerPixel )
#define destTotalPixels kDestImageSizeMB * pixelsPerMB
#define tileTotalPixels kSourceImageTileSizeMB * pixelsPerMB
#define destSeemOverlap 2.0f
@interface LargeImageDispose ()
{
CGContextRef destContext;
}
@property (strong, nonatomic) UIImage *destImage;
@end
@implementation LargeImageDispose
-(UIImage *)downsizeLargeImage:(UIImage *)image {
// create an image from the image filename constant. Note this
// doesn't actually read any pixel information from disk, as that
// is actually done at draw time.
UIImage *sourceImage = image;
if( sourceImage == nil ) NSLog(@"input image not found!");
// get the width and height of the input image using
// core graphics image helper functions.
CGSize sourceResolution;
sourceResolution.width = CGImageGetWidth(sourceImage.CGImage);
sourceResolution.height = CGImageGetHeight(sourceImage.CGImage);
// use the width and height to calculate the total number of pixels
// in the input image.
float sourceTotalPixels = sourceResolution.width * sourceResolution.height;
// calculate the number of MB that would be required to store
// this image uncompressed in memory.
float sourceTotalMB = sourceTotalPixels / pixelsPerMB;
NSLog(@"%.2f",sourceTotalMB);
// determine the scale ratio to apply to the input image
// that results in an output image of the defined size.
// see kDestImageSizeMB, and how it relates to destTotalPixels.
float imageScale = destTotalPixels / sourceTotalPixels;
NSLog(@"%.2f",destTotalPixels);
// use the image scale to calcualte the output image width, height
CGSize destResolution;
destResolution.width = (int)( sourceResolution.width * imageScale );
destResolution.height = (int)( sourceResolution.height * imageScale );
// create an offscreen bitmap context that will hold the output image
// pixel data, as it becomes available by the downscaling routine.
// use the RGB colorspace as this is the colorspace iOS GPU is optimized for.
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
int bytesPerRow = bytesPerPixel * destResolution.width;
// allocate enough pixel data to hold the output image.
void* destBitmapData = malloc( bytesPerRow * destResolution.height );
if( destBitmapData == NULL ) NSLog(@"failed to allocate space for the output image!");
// create the output bitmap context
destContext = CGBitmapContextCreate( destBitmapData, destResolution.width, destResolution.height, 8, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast );
// self.destContext = destContext;
// remember CFTypes assign/check for NULL. NSObjects assign/check for nil.
if( destContext == NULL ) {
free( destBitmapData );
NSLog(@"failed to create the output bitmap context!");
}
// release the color space object as its job is done
CGColorSpaceRelease( colorSpace );
// flip the output graphics context so that it aligns with the
// cocoa style orientation of the input document. this is needed
// because we used cocoa's UIImage -imageNamed to open the input file.
CGContextTranslateCTM( destContext, 0.0f, destResolution.height );
CGContextScaleCTM( destContext, 1.0f, -1.0f );
// now define the size of the rectangle to be used for the
// incremental blits from the input image to the output image.
// we use a source tile width equal to the width of the source
// image due to the way that iOS retrieves image data from disk.
// iOS must decode an image from disk in full width 'bands', even
// if current graphics context is clipped to a subrect within that
// band. Therefore we fully utilize all of the pixel data that results
// from a decoding opertion by achnoring our tile size to the full
// width of the input image.
CGRect sourceTile;
sourceTile.size.width = sourceResolution.width;
// the source tile height is dynamic. Since we specified the size
// of the source tile in MB, see how many rows of pixels high it
// can be given the input image width.
sourceTile.size.height = (int)( tileTotalPixels / sourceTile.size.width );
NSLog(@"source tile size: %f x %f",sourceTile.size.width, sourceTile.size.height);
sourceTile.origin.x = 0.0f;
// the output tile is the same proportions as the input tile, but
// scaled to image scale.
CGRect destTile;
destTile.size.width = destResolution.width;
destTile.size.height = sourceTile.size.height * imageScale;
destTile.origin.x = 0.0f;
NSLog(@"dest tile size: %f x %f",destTile.size.width, destTile.size.height);
// the source seem overlap is proportionate to the destination seem overlap.
// this is the amount of pixels to overlap each tile as we assemble the ouput image.
float sourceSeemOverlap = (int)( ( destSeemOverlap / destResolution.height ) * sourceResolution.height );
NSLog(@"dest seem overlap: %f, source seem overlap: %f",destSeemOverlap, sourceSeemOverlap);
CGImageRef sourceTileImageRef;
// calculate the number of read/write opertions required to assemble the
// output image.
int iterations = (int)( sourceResolution.height / sourceTile.size.height );
// if tile height doesn't divide the image height evenly, add another iteration
// to account for the remaining pixels.
int remainder = (int)sourceResolution.height % (int)sourceTile.size.height;
if( remainder ) iterations++;
// add seem overlaps to the tiles, but save the original tile height for y coordinate calculations.
float sourceTileHeightMinusOverlap = sourceTile.size.height;
sourceTile.size.height += sourceSeemOverlap;
destTile.size.height += destSeemOverlap;
NSLog(@"beginning downsize. iterations: %d, tile height: %f, remainder height: %d", iterations, sourceTile.size.height,remainder );
for( int y = 0; y < iterations; ++y ) {
NSLog(@"iteration %d of %d",y+1,iterations);
sourceTile.origin.y = y * sourceTileHeightMinusOverlap + sourceSeemOverlap;
destTile.origin.y = ( destResolution.height ) - ( ( y + 1 ) * sourceTileHeightMinusOverlap * imageScale + destSeemOverlap );
// create a reference to the source image with its context clipped to the argument rect.
sourceTileImageRef = CGImageCreateWithImageInRect( sourceImage.CGImage, sourceTile );
// if this is the last tile, it's size may be smaller than the source tile height.
// adjust the dest tile size to account for that difference.
if( y == iterations - 1 && remainder ) {
float dify = destTile.size.height;
destTile.size.height = CGImageGetHeight( sourceTileImageRef ) * imageScale;
dify -= destTile.size.height;
destTile.origin.y += dify;
}
// read and write a tile sized portion of pixels from the input image to the output image.
CGContextDrawImage( destContext, destTile, sourceTileImageRef );
/* release the source tile portion pixel data. note,
releasing the sourceTileImageRef doesn't actually release the tile portion pixel
data that we just drew, but the call afterward does. */
CGImageRelease( sourceTileImageRef );
/* while CGImageCreateWithImageInRect lazily loads just the image data defined by the argument rect,
that data is finally decoded from disk to mem when CGContextDrawImage is called. sourceTileImageRef
maintains internally a reference to the original image, and that original image both, houses and
caches that portion of decoded mem. Thus the following call to release the source image. */
// we reallocate the source image after the pool is drained since UIImage -imageNamed
// returns us an autoreleased object.
if( y < iterations - 1 ) {
sourceImage = image;
[self performSelectorOnMainThread:@selector(createImageFromContext) withObject:nil waitUntilDone:YES];
}
}
NSLog(@"downsize complete.");
// [self performSelectorOnMainThread:@selector(initializeScrollView:) withObject:nil waitUntilDone:YES];
// free the context since its job is done. destImageRef retains the pixel data now.
CGContextRelease( destContext );
return self.destImage;
}
-(void)createImageFromContext {
// create a CGImage from the offscreen image context
CGImageRef destImageRef = CGBitmapContextCreateImage( destContext );
if( destImageRef == NULL ) NSLog(@"destImageRef is null.");
// wrap a UIImage around the CGImage
self.destImage = [UIImage imageWithCGImage:destImageRef scale:1.0f orientation:UIImageOrientationDownMirrored];
// release ownership of the CGImage, since destImage retains ownership of the object now.
CGImageRelease( destImageRef );
if( self.destImage == nil ) NSLog(@"destImage is nil.");
}
@end
這段代碼只是改變了圖片的渲染方式档叔,利用GPU 進行渲染桌粉,有效降低內(nèi)存,不改變圖片的質(zhì)量衙四,親測加載24M圖片so easy铃肯,只占20多內(nèi)存
不能用于加載太小的圖片,只能用于加載大圖(大概1M以上)
最近看了這篇文章http://www.reibang.com/p/1c9de8dea3ea
導(dǎo)致加載大圖內(nèi)存暴漲的原因是對大圖的解壓縮加載传蹈。
在SDWebImage中大圖禁用解壓縮押逼,可以防止內(nèi)存暴漲:
[[SDImageCache sharedImageCache] setShouldDecompressImages:NO];
[[SDWebImageDownloader sharedDownloader] setShouldDecompressImages:NO];
測試只有iPhone7步藕,7p,SE,有用,其他機型不起作用挑格,和系統(tǒng)版本沒有關(guān)系