Swift-VideoToolbox H264 硬編解碼

編碼

import UIKit
import VideoToolbox

class ZGH264Encoder: NSObject {
    
    var videoEncodeCallback : ((Data)-> Void)?
    var videoEncodeCallbackSPSAndPPS :((Data,Data)->Void)?
    
    private var width: Int32 = 480
    private var height:Int32 = 640
    private var bitRate : Int32 = 480 * 640 * 3 * 4
    private var fps : Int32 = 10
    private var hasSpsPps = false
    private var frameID:Int64 = 0
    
    
    private var encodeCallBack:VTCompressionOutputCallback?
    private var encodeQueue = DispatchQueue(label: "encode")
    private var callBackQueue = DispatchQueue(label: "callBack")
    private var encodeSession:VTCompressionSession!
    
    init(width:Int32 = 480,height:Int32 = 640,bitRate : Int32? = nil,fps: Int32? = nil ) {
        
        self.width = width
        self.height = height
        self.bitRate = bitRate != nil ? bitRate! : 480 * 640 * 3 * 4
        self.fps = (fps != nil) ? fps! : 10
        super.init()
        
        setCallBack()
        initVideoToolBox()
        
    }
    
    //開始編碼
    func encodeVideo(sampleBuffer:CMSampleBuffer){
        if self.encodeSession == nil {
            initVideoToolBox()
        }
        encodeQueue.async {
            let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
            var time = CMTime(value: self.frameID, timescale: 1000)
            if #available(iOS 15, *){
                time = CMTime(value: self.frameID, timescale: 100)
                self.frameID += 1
            }
            
            let state = VTCompressionSessionEncodeFrame(self.encodeSession, imageBuffer: imageBuffer!, presentationTimeStamp: time, duration: .invalid, frameProperties: nil, sourceFrameRefcon: nil, infoFlagsOut: nil)
            if state != 0{
                print("encode filure")
            }
        }
    }
    
    func stop(){
        hasSpsPps = false
        frameID = 0
        if ((encodeSession) != nil) {
            VTCompressionSessionCompleteFrames(encodeSession, untilPresentationTimeStamp: .invalid)
            VTCompressionSessionInvalidate(encodeSession);
            encodeSession = nil;
        }
    }
    
    //初始化編碼器
    private func initVideoToolBox() {
        //創(chuàng)建VTCompressionSession
        let state = VTCompressionSessionCreate(allocator: kCFAllocatorDefault, width: width, height: height, codecType: kCMVideoCodecType_H264, encoderSpecification: nil, imageBufferAttributes: nil, compressedDataAllocator: nil, outputCallback:encodeCallBack , refcon: unsafeBitCast(self, to: UnsafeMutableRawPointer.self), compressionSessionOut: &self.encodeSession)
        
        if state != 0{
            print("creat VTCompressionSession failed")
            return
        }
        
        //設置實時編碼輸出
        VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_RealTime, value: kCFBooleanTrue)
        //設置編碼方式
        VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_ProfileLevel, value: kVTProfileLevel_H264_Baseline_AutoLevel)
        //設置是否產(chǎn)生B幀(因為B幀在解碼時并不是必要的,是可以拋棄B幀的)
        VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_AllowFrameReordering, value: kCFBooleanFalse)
        //設置關鍵幀間隔
        var frameInterval = 10
        let number = CFNumberCreate(kCFAllocatorDefault, CFNumberType.intType, &frameInterval)
        VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_MaxKeyFrameInterval, value: number)
        
        //設置期望幀率做个,不是實際幀率
        let fpscf = CFNumberCreate(kCFAllocatorDefault, CFNumberType.intType, &fps)
        VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_ExpectedFrameRate, value: fpscf)
        
        //設置碼率平均值,單位是bps融求。碼率大了話就會非常清晰囤躁,但同時文件也會比較大泽艘。碼率小的話缘厢,圖像有時會模糊保礼,但也勉強能看
        //碼率計算公式參考筆記
        //        var bitrate = width * height * 3 * 4
        let bitrateAverage = CFNumberCreate(kCFAllocatorDefault, CFNumberType.intType, &bitRate)
        VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_AverageBitRate, value: bitrateAverage)
        
        //碼率限制
        let bitRatesLimit :CFArray = [bitRate * 5 / 8, 1] as CFArray
        VTSessionSetProperty(encodeSession, key: kVTCompressionPropertyKey_DataRateLimits, value: bitRatesLimit)
    }

    private func setCallBack()  {
        //編碼完成回調(diào)
        encodeCallBack = {(outputCallbackRefCon, sourceFrameRefCon, status, flag, sampleBuffer)  in
            let encoder :ZGH264Encoder = unsafeBitCast(outputCallbackRefCon, to: ZGH264Encoder.self)
            guard sampleBuffer != nil else {
                return
            }
            /// 0. 原始字節(jié)數(shù)據(jù) 8字節(jié)
            let buffer : [UInt8] = [0x00,0x00,0x00,0x01]
            /// 1. [UInt8] -> UnsafeBufferPointer<UInt8>
            let unsafeBufferPointer = buffer.withUnsafeBufferPointer {$0}
            /// 2.. UnsafeBufferPointer<UInt8> -> UnsafePointer<UInt8>
            let  unsafePointer = unsafeBufferPointer.baseAddress
            guard let startCode = unsafePointer else {return}
            let attachArray = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer!, createIfNecessary: false)
            let strkey = unsafeBitCast(kCMSampleAttachmentKey_NotSync, to: UnsafeRawPointer.self)
            let cfDic = unsafeBitCast(CFArrayGetValueAtIndex(attachArray, 0), to: CFDictionary.self)
            let keyFrame = !CFDictionaryContainsKey(cfDic, strkey);//沒有這個鍵就意味著同步,就是關鍵幀
            //  獲取sps pps
            if keyFrame && !encoder.hasSpsPps{
                if let description = CMSampleBufferGetFormatDescription(sampleBuffer!){
                    
                    var spsSize: Int = 0, spsCount :Int = 0,spsHeaderLength:Int32 = 0
                    var ppsSize: Int = 0, ppsCount: Int = 0,ppsHeaderLength:Int32 = 0
                    var spsDataPointer : UnsafePointer<UInt8>? = UnsafePointer(UnsafeMutablePointer<UInt8>.allocate(capacity: 0))
                    var ppsDataPointer : UnsafePointer<UInt8>? = UnsafePointer<UInt8>(bitPattern: 0)
                    let spsstatus = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(description, parameterSetIndex: 0, parameterSetPointerOut: &spsDataPointer, parameterSetSizeOut: &spsSize, parameterSetCountOut: &spsCount, nalUnitHeaderLengthOut: &spsHeaderLength)
                    if spsstatus != 0{
                        print("sps失敗")
                    }
                    
                    let ppsStatus = CMVideoFormatDescriptionGetH264ParameterSetAtIndex(description, parameterSetIndex: 1, parameterSetPointerOut: &ppsDataPointer, parameterSetSizeOut: &ppsSize, parameterSetCountOut: &ppsCount, nalUnitHeaderLengthOut: &ppsHeaderLength)
                    if ppsStatus != 0 {
                        print("pps失敗")
                    }
                    
                    if let spsData = spsDataPointer,let ppsData = ppsDataPointer{
                        var spsDataValue = Data(capacity: 4 + spsSize)
                        spsDataValue.append(buffer, count: 4)
                        spsDataValue.append(spsData, count: spsSize)
                        
                        var ppsDataValue = Data(capacity: 4 + ppsSize)
                        ppsDataValue.append(startCode, count: 4)
                        ppsDataValue.append(ppsData, count: ppsSize)
                        encoder.callBackQueue.async {
                            encoder.videoEncodeCallbackSPSAndPPS!(spsDataValue, ppsDataValue)
                        }
                    }
                }
                encoder.hasSpsPps = true
            }
            
            let dataBuffer = CMSampleBufferGetDataBuffer(sampleBuffer!)
            var dataPointer: UnsafeMutablePointer<Int8>?  = nil
            var totalLength :Int = 0
            let blockState = CMBlockBufferGetDataPointer(dataBuffer!, atOffset: 0, lengthAtOffsetOut: nil, totalLengthOut: &totalLength, dataPointerOut: &dataPointer)
            if blockState != 0{
                print("獲取data失敗\(blockState)")
            }
            
            //NALU
            var offset :UInt32 = 0
            //返回的nalu數(shù)據(jù)前四個字節(jié)不是0001的startcode(不是系統(tǒng)端的0001)沛励,而是大端模式的幀長度length
            let lengthInfoSize = 4
            //循環(huán)寫入nalu數(shù)據(jù)
            while offset < totalLength - lengthInfoSize {
                //獲取nalu 數(shù)據(jù)長度
                var naluDataLength:UInt32 = 0
                memcpy(&naluDataLength, dataPointer! + UnsafeMutablePointer<Int8>.Stride(offset), lengthInfoSize)
                //大端轉系統(tǒng)端
                naluDataLength = CFSwapInt32BigToHost(naluDataLength)
                //獲取到編碼好的視頻數(shù)據(jù)
                var data = Data(capacity: Int(naluDataLength) + lengthInfoSize)
                data.append(buffer, count: 4)
                //轉化pointer责语;UnsafeMutablePointer<Int8> -> UnsafePointer<UInt8>
                let naluUnsafePoint = unsafeBitCast(dataPointer, to: UnsafePointer<UInt8>.self)

                data.append(naluUnsafePoint + UnsafePointer<UInt8>.Stride(offset + UInt32(lengthInfoSize)) , count: Int(naluDataLength))
                
                encoder.callBackQueue.async {
                    encoder.videoEncodeCallback!(data)
                }
                offset += (naluDataLength + UInt32(lengthInfoSize))
            }
        }
    }
    
    
    deinit {
        stop()
    }
}

解碼類型

如果需要使用AVSampleBufferDisplayLayer播放CMSampleBuffer可以選擇輸出為sampleBuffer,其中sampleBuffer并未進行解碼目派,AVSampleBufferDisplayLayer支持h264的CMSampleBuffer播放

enum ZGH264DecodeType {
    case imageBuffer //CVPixcelBuffer
    case sampleBuffer //CMSampleBuffer(H264)
}

解碼

解碼需要使用編碼的SPS坤候、PPS

import UIKit
import VideoToolbox

class ZGH264Decoder: NSObject {
    
    var videoDecodeCallback:((CVImageBuffer?) -> Void)?
    var videoDecodeSampleBufferCallback:((CMSampleBuffer?) -> Void)?
    
    private var width: Int32 = 480
    private var height:Int32 = 640
    
    private var spsData:Data?
    private var ppsData:Data?
    private var decompressionSession : VTDecompressionSession?
    private var decodeDesc : CMVideoFormatDescription?
    private var callback :VTDecompressionOutputCallback?
    private var decodeQueue = DispatchQueue(label: "decode")
    private var callBackQueue = DispatchQueue(label: "decodeCallBack")
    
    private var returnType: ZGH264DecodeType = .sampleBuffer
    
    init(width:Int32,height:Int32) {
        self.width = width
        self.height = height
    }
    
    func decode(data:Data) {
        decodeQueue.async {
            let length:UInt32 =  UInt32(data.count)
            self.decodeByte(data: data, size: length)
        }
    }
    
    /// 設置解碼返回類型
    /// - Parameter type:ZGH264DecodeType
    func setReturnType(type: ZGH264DecodeType){
        returnType = type
    }
    
    private func decodeByte(data:Data,size:UInt32) {
        //數(shù)據(jù)類型:frame的前4個字節(jié)是NALU數(shù)據(jù)的開始碼,也就是00 00 00 01企蹭,
        // 將NALU的開始碼轉為4字節(jié)大端NALU的長度信息
        let naluSize = size - 4
        let length : [UInt8] = [
            UInt8(truncatingIfNeeded: naluSize >> 24),
            UInt8(truncatingIfNeeded: naluSize >> 16),
            UInt8(truncatingIfNeeded: naluSize >> 8),
            UInt8(truncatingIfNeeded: naluSize)
            ]
        var frameByte :[UInt8] = length
        [UInt8](data).suffix(from: 4).forEach { (bb) in
            frameByte.append(bb)
        }
        let bytes = frameByte //[UInt8](frameData)
        // 第5個字節(jié)是表示數(shù)據(jù)類型白筹,轉為10進制后,7是sps, 8是pps, 5是IDR(I幀)信息
        let type :Int  = Int(bytes[4] & 0x1f)
        switch type{
        case 0x05:
            if initDecoder() {
                decode(frame: bytes, size: size)
            }
            
        case 0x06:
//            print("增強信息")
            break
        case 0x07:
            spsData = data
        case 0x08:
            ppsData = data
        default:
            if initDecoder() {
                decode(frame: bytes, size: size)
            }
        }
    }
    
    private func decode(frame:[UInt8],size:UInt32) {
        //
        var blockBUffer :CMBlockBuffer?
        var frame1 = frame
        //創(chuàng)建blockBuffer
        /*!
         參數(shù)1: structureAllocator kCFAllocatorDefault
         參數(shù)2: memoryBlock  frame
         參數(shù)3: frame size
         參數(shù)4: blockAllocator: Pass NULL
         參數(shù)5: customBlockSource Pass NULL
         參數(shù)6: offsetToData  數(shù)據(jù)偏移
         參數(shù)7: dataLength 數(shù)據(jù)長度
         參數(shù)8: flags 功能和控制標志
         參數(shù)9: newBBufOut blockBuffer地址,不能為空
         */
        let blockState = CMBlockBufferCreateWithMemoryBlock(allocator: kCFAllocatorDefault,
                                           memoryBlock: &frame1,
                                           blockLength: Int(size),
                                           blockAllocator: kCFAllocatorNull,
                                           customBlockSource: nil,
                                           offsetToData:0,
                                           dataLength: Int(size),
                                           flags: 0,
                                           blockBufferOut: &blockBUffer)
        if blockState != 0 {
            print("創(chuàng)建blockBuffer失敗")
        }
//
        var sampleSizeArray :[Int] = [Int(size)]
        var sampleBuffer :CMSampleBuffer?
        //創(chuàng)建sampleBuffer
        /*
         參數(shù)1: allocator 分配器,使用默認內(nèi)存分配, kCFAllocatorDefault
         參數(shù)2: blockBuffer.需要編碼的數(shù)據(jù)blockBuffer.不能為NULL
         參數(shù)3: formatDescription,視頻輸出格式
         參數(shù)4: numSamples.CMSampleBuffer 個數(shù).
         參數(shù)5: numSampleTimingEntries 必須為0,1,numSamples
         參數(shù)6: sampleTimingArray.  數(shù)組.為空
         參數(shù)7: numSampleSizeEntries 默認為1
         參數(shù)8: sampleSizeArray
         參數(shù)9: sampleBuffer對象
         */
        let readyState = CMSampleBufferCreateReady(allocator: kCFAllocatorDefault,
                                  dataBuffer: blockBUffer,
                                  formatDescription: decodeDesc,
                                  sampleCount: CMItemCount(1),
                                  sampleTimingEntryCount: CMItemCount(),
                                  sampleTimingArray: nil,
                                  sampleSizeEntryCount: CMItemCount(1),
                                  sampleSizeArray: &sampleSizeArray,
                                  sampleBufferOut: &sampleBuffer)
        
        guard let buffer = sampleBuffer, readyState == kCMBlockBufferNoErr else {
            print("解碼失敗")
            return
        }
        
        if returnType == .sampleBuffer {
            if let attachmentArray = CMSampleBufferGetSampleAttachmentsArray(buffer, createIfNecessary: true) {
                let dic = unsafeBitCast(CFArrayGetValueAtIndex(attachmentArray, 0), to: CFMutableDictionary.self)
                CFDictionarySetValue(dic,
                                     Unmanaged.passUnretained(kCMSampleAttachmentKey_DisplayImmediately).toOpaque(),
                                     Unmanaged.passUnretained(kCFBooleanTrue).toOpaque())
            }
            videoDecodeSampleBufferCallback?(sampleBuffer)
            return
        }
    
        //解碼數(shù)據(jù)為CVPixcelBuffer
        /*
         參數(shù)1: 解碼session
         參數(shù)2: 源數(shù)據(jù) 包含一個或多個視頻幀的CMsampleBuffer
         參數(shù)3: 解碼標志
         參數(shù)4: 解碼后數(shù)據(jù)outputPixelBuffer
         參數(shù)5: 同步/異步解碼標識
         */
        let sourceFrame:UnsafeMutableRawPointer? = nil
        var inforFalg = VTDecodeInfoFlags.asynchronous
        let decodeState = VTDecompressionSessionDecodeFrame(self.decompressionSession!, sampleBuffer: sampleBuffer!, flags:VTDecodeFrameFlags._EnableAsynchronousDecompression , frameRefcon: sourceFrame, infoFlagsOut: &inforFalg)
        if decodeState != 0 {
            print("解碼失敗")
        }
        
    }
    
    private func initDecoder() -> Bool {
        
        if decompressionSession != nil {
            return true
        }
        guard spsData != nil,ppsData != nil else {
            return false
        }

        //處理sps/pps
        var sps : [UInt8] = []
        [UInt8](spsData!).suffix(from: 4).forEach { (value) in
            sps.append(value)
        }
        var pps : [UInt8] = []
        [UInt8](ppsData!).suffix(from: 4).forEach{(value) in
            pps.append(value)
        }
        
        let spsAndpps = [sps.withUnsafeBufferPointer{$0}.baseAddress!,pps.withUnsafeBufferPointer{$0}.baseAddress!]
        let sizes = [sps.count,pps.count]

        /**
        根據(jù)sps pps設置解碼參數(shù)
        param kCFAllocatorDefault 分配器
        param 2 參數(shù)個數(shù)
        param parameterSetPointers 參數(shù)集指針
        param parameterSetSizes 參數(shù)集大小
        param naluHeaderLen nalu nalu start code 的長度 4
        param _decodeDesc 解碼器描述
        return 狀態(tài)
        */
        let descriptionState = CMVideoFormatDescriptionCreateFromH264ParameterSets(allocator: kCFAllocatorDefault, parameterSetCount: 2, parameterSetPointers: spsAndpps, parameterSetSizes: sizes, nalUnitHeaderLength: 4, formatDescriptionOut: &decodeDesc)
        if descriptionState != 0 {
            print("description創(chuàng)建失敗" )
            return false
        }
        //解碼回調(diào)設置
        /*
         VTDecompressionOutputCallbackRecord 是一個簡單的結構體谅摄,它帶有一個指針 (decompressionOutputCallback)徒河,指向幀解壓完成后的回調(diào)方法。你需要提供可以找到這個回調(diào)方法的實例 (decompressionOutputRefCon)
         */
        setCallBack()
        var callbackRecord = VTDecompressionOutputCallbackRecord(decompressionOutputCallback: callback, decompressionOutputRefCon: unsafeBitCast(self, to: UnsafeMutableRawPointer.self))
        /*
         解碼參數(shù):
        * kCVPixelBufferPixelFormatTypeKey:攝像頭的輸出數(shù)據(jù)格式
         kCVPixelBufferPixelFormatTypeKey送漠,已測可用值為
            kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange顽照,即420v
            kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,即420f
            kCVPixelFormatType_32BGRA闽寡,iOS在內(nèi)部進行YUV至BGRA格式轉換
         YUV420一般用于標清視頻代兵,YUV422用于高清視頻,這里的限制讓人感到意外下隧。但是奢人,在相同條件下,YUV420計算耗時和傳輸壓力比YUV422都小淆院。
         
        * kCVPixelBufferWidthKey/kCVPixelBufferHeightKey: 視頻源的分辨率 width*height
         * kCVPixelBufferOpenGLCompatibilityKey : 它允許在 OpenGL 的上下文中直接繪制解碼后的圖像何乎,而不是從總線和 CPU 之間復制數(shù)據(jù)。這有時候被稱為零拷貝通道土辩,因為在繪制過程中沒有解碼的圖像被拷貝.
         
         */
        let imageBufferAttributes = [
            kCVPixelBufferPixelFormatTypeKey:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
            kCVPixelBufferWidthKey:width,
            kCVPixelBufferHeightKey:height,
//            kCVPixelBufferOpenGLCompatibilityKey:true
            ] as [CFString : Any]
        
        //創(chuàng)建session
        
        /*!
         @function    VTDecompressionSessionCreate
         @abstract    創(chuàng)建用于解壓縮視頻幀的會話支救。
         @discussion  解壓后的幀將通過調(diào)用OutputCallback發(fā)出
         @param    allocator  內(nèi)存的會話。通過使用默認的kCFAllocatorDefault的分配器拷淘。
         @param    videoFormatDescription 描述源視頻幀
         @param    videoDecoderSpecification 指定必須使用的特定視頻解碼器.NULL
         @param    destinationImageBufferAttributes 描述源像素緩沖區(qū)的要求 NULL
         @param    outputCallback 使用已解壓縮的幀調(diào)用的回調(diào)
         @param    decompressionSessionOut 指向一個變量以接收新的解壓會話
         */
        let state = VTDecompressionSessionCreate(allocator: kCFAllocatorDefault, formatDescription: decodeDesc!, decoderSpecification: nil, imageBufferAttributes: imageBufferAttributes as CFDictionary, outputCallback: &callbackRecord, decompressionSessionOut: &decompressionSession)
        if state != 0 {
            print("創(chuàng)建decodeSession失敗")
        }
        VTSessionSetProperty(self.decompressionSession!, key: kVTDecompressionPropertyKey_RealTime, value: kCFBooleanTrue)
        
        return true
        
    }

    //解碼成功的回掉
    private func setCallBack()  {
        /*
         VTDecompressionOutputCallback 回調(diào)方法包括七個參數(shù):
                參數(shù)1: 回調(diào)的引用
                參數(shù)2: 幀的引用
                參數(shù)3: 一個狀態(tài)標識 (包含未定義的代碼)
                參數(shù)4: 指示同步/異步解碼各墨,或者解碼器是否打算丟幀的標識
                參數(shù)5: 實際圖像的緩沖
                參數(shù)6: 出現(xiàn)的時間戳
                參數(shù)7: 出現(xiàn)的持續(xù)時間
         */
        //(UnsafeMutableRawPointer?, UnsafeMutableRawPointer?, OSStatus, VTDecodeInfoFlags, CVImageBuffer?, CMTime, CMTime) -> Void
        callback = { decompressionOutputRefCon,sourceFrameRefCon,status,inforFlags,imageBuffer,presentationTimeStamp,presentationDuration in
            let decoder :ZGH264Decoder = unsafeBitCast(decompressionOutputRefCon, to: ZGH264Decoder.self)
            
            guard imageBuffer != nil else {
                return
            }
            if let block = decoder.videoDecodeCallback  {
                decoder.callBackQueue.async {
                    block(imageBuffer)
                }
            }
        }
    }
    
    deinit {
        if decompressionSession != nil {
            VTDecompressionSessionInvalidate(decompressionSession!)
            decompressionSession = nil
        }
        
    }
}

使用示例

let h264Encoder = ZGH264Encoder(width: 360, height: 640, bitRate: 2*1024*1024)
let h264Decoder = ZGH264Decoder(width: 360, height: 640)

h264Encoder.videoEncodeCallback = {[weak self] (data) in
            self?.h264Decoder.decode(data: data)
        }
h264Encoder.videoEncodeCallbackSPSAndPPS = {[weak self] (sps, pps) in
            self?.h264Decoder.decode(data: sps)
            self?.h264Decoder.decode(data: pps)
        }

h264Decoder.videoDecodeSampleBufferCallback = { [weak self] sampleBuffer in
            guard let sampleBuffer = sampleBuffer else { return }
            self?.otherPreView.enqueue(sampleBuffer: sampleBuffer)
        }

//        h264Decoder.setReturnType(type: .imageBuffer)
//        h264Decoder.videoDecodeCallback = { pixelBuffer in
//
//        }

相關資料
VideoEncodeH264

最后編輯于
?著作權歸作者所有,轉載或內(nèi)容合作請聯(lián)系作者
  • 序言:七十年代末,一起剝皮案震驚了整個濱河市启涯,隨后出現(xiàn)的幾起案子贬堵,更是在濱河造成了極大的恐慌,老刑警劉巖结洼,帶你破解...
    沈念sama閱讀 206,126評論 6 481
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件黎做,死亡現(xiàn)場離奇詭異,居然都是意外死亡松忍,警方通過查閱死者的電腦和手機蒸殿,發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 88,254評論 2 382
  • 文/潘曉璐 我一進店門,熙熙樓的掌柜王于貴愁眉苦臉地迎上來,“玉大人宏所,你說我怎么就攤上這事酥艳。” “怎么了爬骤?”我有些...
    開封第一講書人閱讀 152,445評論 0 341
  • 文/不壞的土叔 我叫張陵充石,是天一觀的道長。 經(jīng)常有香客問我霞玄,道長赫冬,這世上最難降的妖魔是什么? 我笑而不...
    開封第一講書人閱讀 55,185評論 1 278
  • 正文 為了忘掉前任溃列,我火速辦了婚禮,結果婚禮上膛薛,老公的妹妹穿的比我還像新娘听隐。我一直安慰自己,他們只是感情好哄啄,可當我...
    茶點故事閱讀 64,178評論 5 371
  • 文/花漫 我一把揭開白布雅任。 她就那樣靜靜地躺著,像睡著了一般咨跌。 火紅的嫁衣襯著肌膚如雪沪么。 梳的紋絲不亂的頭發(fā)上,一...
    開封第一講書人閱讀 48,970評論 1 284
  • 那天锌半,我揣著相機與錄音禽车,去河邊找鬼。 笑死刊殉,一個胖子當著我的面吹牛殉摔,可吹牛的內(nèi)容都是我干的。 我是一名探鬼主播记焊,決...
    沈念sama閱讀 38,276評論 3 399
  • 文/蒼蘭香墨 我猛地睜開眼逸月,長吁一口氣:“原來是場噩夢啊……” “哼!你這毒婦竟也來了遍膜?” 一聲冷哼從身側響起碗硬,我...
    開封第一講書人閱讀 36,927評論 0 259
  • 序言:老撾萬榮一對情侶失蹤,失蹤者是張志新(化名)和其女友劉穎瓢颅,沒想到半個月后恩尾,有當?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體,經(jīng)...
    沈念sama閱讀 43,400評論 1 300
  • 正文 獨居荒郊野嶺守林人離奇死亡惜索,尸身上長有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點故事閱讀 35,883評論 2 323
  • 正文 我和宋清朗相戀三年特笋,在試婚紗的時候發(fā)現(xiàn)自己被綠了。 大學時的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片。...
    茶點故事閱讀 37,997評論 1 333
  • 序言:一個原本活蹦亂跳的男人離奇死亡猎物,死狀恐怖虎囚,靈堂內(nèi)的尸體忽然破棺而出,到底是詐尸還是另有隱情蔫磨,我是刑警寧澤淘讥,帶...
    沈念sama閱讀 33,646評論 4 322
  • 正文 年R本政府宣布,位于F島的核電站堤如,受9級特大地震影響蒲列,放射性物質發(fā)生泄漏。R本人自食惡果不足惜搀罢,卻給世界環(huán)境...
    茶點故事閱讀 39,213評論 3 307
  • 文/蒙蒙 一蝗岖、第九天 我趴在偏房一處隱蔽的房頂上張望。 院中可真熱鬧榔至,春花似錦抵赢、人聲如沸。這莊子的主人今日做“春日...
    開封第一講書人閱讀 30,204評論 0 19
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽。三九已至枫弟,卻和暖如春邢享,著一層夾襖步出監(jiān)牢的瞬間,已是汗流浹背淡诗。 一陣腳步聲響...
    開封第一講書人閱讀 31,423評論 1 260
  • 我被黑心中介騙來泰國打工骇塘, 沒想到剛下飛機就差點兒被人妖公主榨干…… 1. 我叫王不留,地道東北人袜漩。 一個月前我還...
    沈念sama閱讀 45,423評論 2 352
  • 正文 我出身青樓绪爸,卻偏偏與公主長得像,于是被迫代替她去往敵國和親宙攻。 傳聞我的和親對象是個殘疾皇子奠货,可洞房花燭夜當晚...
    茶點故事閱讀 42,722評論 2 345

推薦閱讀更多精彩內(nèi)容