device query

~/samples/NVIDIA_CUDA-9.1_Samples/1_Utilities/deviceQuery$ make

/usr/local/cuda-9.1/bin/nvcc -ccbin g++ -I../../common/inc? -m64? ? -gencode arch=compute_30,code=sm_30 -gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_70,code=compute_70 -o deviceQuery.o -c deviceQuery.cpp

/usr/local/cuda-9.1/bin/nvcc -ccbin g++? -m64? ? ? -gencode arch=compute_30,code=sm_30 -gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_70,code=compute_70 -o deviceQuery deviceQuery.o

mkdir -p ../../bin/x86_64/linux/release

cp deviceQuery ../../bin/x86_64/linux/release

~/samples/NVIDIA_CUDA-9.1_Samples/1_Utilities/deviceQuery$ ls

deviceQuery? deviceQuery.cpp? deviceQuery.o? Makefile? NsightEclipse.xml? readme.txt

~/samples/NVIDIA_CUDA-9.1_Samples/1_Utilities/deviceQuery$ ./deviceQuery

./deviceQuery Starting...

CUDA Device Query (Runtime API) version (CUDART static linking)

Detected 1 CUDA Capable device(s)

Device 0: "GeForce GTX 1050"

? CUDA Driver Version / Runtime Version? ? ? ? ? 10.1 / 9.1

? CUDA Capability Major/Minor version number:? ? 6.1

? Total amount of global memory:? ? ? ? ? ? ? ? 1998 MBytes (2095382528 bytes)

? ( 5) Multiprocessors, (128) CUDA Cores/MP:? ? 640 CUDA Cores

? GPU Max Clock rate:? ? ? ? ? ? ? ? ? ? ? ? ? ? 1455 MHz (1.46 GHz)

? Memory Clock rate:? ? ? ? ? ? ? ? ? ? ? ? ? ? 3504 Mhz

? Memory Bus Width:? ? ? ? ? ? ? ? ? ? ? ? ? ? ? 128-bit

? L2 Cache Size:? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? 1048576 bytes

? Maximum Texture Dimension Size (x,y,z)? ? ? ? 1D=(131072), 2D=(131072, 65536), 3D=(16384, 16384, 16384)

? Maximum Layered 1D Texture Size, (num) layers? 1D=(32768), 2048 layers

? Maximum Layered 2D Texture Size, (num) layers? 2D=(32768, 32768), 2048 layers

? Total amount of constant memory:? ? ? ? ? ? ? 65536 bytes

? Total amount of shared memory per block:? ? ? 49152 bytes

? Total number of registers available per block: 65536

? Warp size:? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? 32

? Maximum number of threads per multiprocessor:? 2048

? Maximum number of threads per block:? ? ? ? ? 1024

? Max dimension size of a thread block (x,y,z): (1024, 1024, 64)

? Max dimension size of a grid size? ? (x,y,z): (2147483647, 65535, 65535)

? Maximum memory pitch:? ? ? ? ? ? ? ? ? ? ? ? ? 2147483647 bytes

? Texture alignment:? ? ? ? ? ? ? ? ? ? ? ? ? ? 512 bytes

? Concurrent copy and kernel execution:? ? ? ? ? Yes with 2 copy engine(s)

? Run time limit on kernels:? ? ? ? ? ? ? ? ? ? No

? Integrated GPU sharing Host Memory:? ? ? ? ? ? No

? Support host page-locked memory mapping:? ? ? Yes

? Alignment requirement for Surfaces:? ? ? ? ? ? Yes

? Device has ECC support:? ? ? ? ? ? ? ? ? ? ? ? Disabled

? Device supports Unified Addressing (UVA):? ? ? Yes

? Supports Cooperative Kernel Launch:? ? ? ? ? ? Yes

? Supports MultiDevice Co-op Kernel Launch:? ? ? Yes

? Device PCI Domain ID / Bus ID / location ID:? 0 / 2 / 0

? Compute Mode:

? ? < Default (multiple host threads can use ::cudaSetDevice() with device simultaneously) >

deviceQuery, CUDA Driver = CUDART, CUDA Driver Version = 10.1, CUDA Runtime Version = 9.1, NumDevs = 1

Result = PASS




~/samples/NVIDIA_CUDA-9.1_Samples/1_Utilities/deviceQuery$ cat deviceQuery.cpp

/*

* Copyright 1993-2015 NVIDIA Corporation.? All rights reserved.

*

* Please refer to the NVIDIA end user license agreement (EULA) associated

* with this source code for terms and conditions that govern your use of

* this software. Any use, reproduction, disclosure, or distribution of

* this software and related documentation outside the terms of the EULA

* is strictly prohibited.

*

*/

/* This sample queries the properties of the CUDA devices present in the system via CUDA Runtime API. */

// Shared Utilities (QA Testing)

// std::system includes

#include <memory>

#include <iostream>

#include <cuda_runtime.h>

#include <helper_cuda.h>

int *pArgc = NULL;

char **pArgv = NULL;

#if CUDART_VERSION < 5000

// CUDA-C includes

#include <cuda.h>

// This function wraps the CUDA Driver API into a template function

template <class T>

inline void getCudaAttribute(T *attribute, CUdevice_attribute device_attribute, int device)

{

? ? CUresult error =? ? cuDeviceGetAttribute(attribute, device_attribute, device);

? ? if (CUDA_SUCCESS != error)

? ? {

? ? ? ? fprintf(stderr, "cuSafeCallNoSync() Driver API error = %04d from file <%s>, line %i.\n",

? ? ? ? ? ? ? ? error, __FILE__, __LINE__);

? ? ? ? exit(EXIT_FAILURE);

? ? }

}

#endif /* CUDART_VERSION < 5000 */

////////////////////////////////////////////////////////////////////////////////

// Program main

////////////////////////////////////////////////////////////////////////////////

int

main(int argc, char **argv)

{

? ? pArgc = &argc;

? ? pArgv = argv;

? ? printf("%s Starting...\n\n", argv[0]);

? ? printf(" CUDA Device Query (Runtime API) version (CUDART static linking)\n\n");

? ? int deviceCount = 0;

? ? cudaError_t error_id = cudaGetDeviceCount(&deviceCount);

? ? if (error_id != cudaSuccess)

? ? {

? ? ? ? printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id));

? ? ? ? printf("Result = FAIL\n");

? ? ? ? exit(EXIT_FAILURE);

? ? }

? ? // This function call returns 0 if there are no CUDA capable devices.

? ? if (deviceCount == 0)

? ? {

? ? ? ? printf("There are no available device(s) that support CUDA\n");

? ? }

? ? else

? ? {

? ? ? ? printf("Detected %d CUDA Capable device(s)\n", deviceCount);

? ? }

? ? int dev, driverVersion = 0, runtimeVersion = 0;

? ? for (dev = 0; dev < deviceCount; ++dev)

? ? {

? ? ? ? cudaSetDevice(dev);

? ? ? ? cudaDeviceProp deviceProp;

? ? ? ? cudaGetDeviceProperties(&deviceProp, dev);

? ? ? ? printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);

? ? ? ? // Console log

? ? ? ? cudaDriverGetVersion(&driverVersion);

? ? ? ? cudaRuntimeGetVersion(&runtimeVersion);

? ? ? ? printf("? CUDA Driver Version / Runtime Version? ? ? ? ? %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10);

? ? ? ? printf("? CUDA Capability Major/Minor version number:? ? %d.%d\n", deviceProp.major, deviceProp.minor);

? ? ? ? char msg[256];

? ? ? ? SPRINTF(msg, "? Total amount of global memory:? ? ? ? ? ? ? ? %.0f MBytes (%llu bytes)\n",

? ? ? ? ? ? ? ? (float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);

? ? ? ? printf("%s", msg);

? ? ? ? printf("? (%2d) Multiprocessors, (%3d) CUDA Cores/MP:? ? %d CUDA Cores\n",

? ? ? ? ? ? ? deviceProp.multiProcessorCount,

? ? ? ? ? ? ? _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),

? ? ? ? ? ? ? _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount);

? ? ? ? printf("? GPU Max Clock rate:? ? ? ? ? ? ? ? ? ? ? ? ? ? %.0f MHz (%0.2f GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);

#if CUDART_VERSION >= 5000

? ? ? ? // This is supported in CUDA 5.0 (runtime API device properties)

? ? ? ? printf("? Memory Clock rate:? ? ? ? ? ? ? ? ? ? ? ? ? ? %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f);

? ? ? ? printf("? Memory Bus Width:? ? ? ? ? ? ? ? ? ? ? ? ? ? ? %d-bit\n",? deviceProp.memoryBusWidth);

? ? ? ? if (deviceProp.l2CacheSize)

? ? ? ? {

? ? ? ? ? ? printf("? L2 Cache Size:? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? %d bytes\n", deviceProp.l2CacheSize);

? ? ? ? }

#else

? ? ? ? // This only available in CUDA 4.0-4.2 (but these were only exposed in the CUDA Driver API)

? ? ? ? int memoryClock;

? ? ? ? getCudaAttribute<int>(&memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, dev);

? ? ? ? printf("? Memory Clock rate:? ? ? ? ? ? ? ? ? ? ? ? ? ? %.0f Mhz\n", memoryClock * 1e-3f);

? ? ? ? int memBusWidth;

? ? ? ? getCudaAttribute<int>(&memBusWidth, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, dev);

? ? ? ? printf("? Memory Bus Width:? ? ? ? ? ? ? ? ? ? ? ? ? ? ? %d-bit\n", memBusWidth);

? ? ? ? int L2CacheSize;

? ? ? ? getCudaAttribute<int>(&L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev);

? ? ? ? if (L2CacheSize)

? ? ? ? {

? ? ? ? ? ? printf("? L2 Cache Size:? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? %d bytes\n", L2CacheSize);

? ? ? ? }

#endif

? ? ? ? printf("? Maximum Texture Dimension Size (x,y,z)? ? ? ? 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n",

? ? ? ? ? ? ? deviceProp.maxTexture1D? , deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1],

? ? ? ? ? ? ? deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);

? ? ? ? printf("? Maximum Layered 1D Texture Size, (num) layers? 1D=(%d), %d layers\n",

? ? ? ? ? ? ? deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]);

? ? ? ? printf("? Maximum Layered 2D Texture Size, (num) layers? 2D=(%d, %d), %d layers\n",

? ? ? ? ? ? ? deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]);

? ? ? ? printf("? Total amount of constant memory:? ? ? ? ? ? ? %lu bytes\n", deviceProp.totalConstMem);

? ? ? ? printf("? Total amount of shared memory per block:? ? ? %lu bytes\n", deviceProp.sharedMemPerBlock);

? ? ? ? printf("? Total number of registers available per block: %d\n", deviceProp.regsPerBlock);

? ? ? ? printf("? Warp size:? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? %d\n", deviceProp.warpSize);

? ? ? ? printf("? Maximum number of threads per multiprocessor:? %d\n", deviceProp.maxThreadsPerMultiProcessor);

? ? ? ? printf("? Maximum number of threads per block:? ? ? ? ? %d\n", deviceProp.maxThreadsPerBlock);

? ? ? ? printf("? Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n",

? ? ? ? ? ? ? deviceProp.maxThreadsDim[0],

? ? ? ? ? ? ? deviceProp.maxThreadsDim[1],

? ? ? ? ? ? ? deviceProp.maxThreadsDim[2]);

? ? ? ? printf("? Max dimension size of a grid size? ? (x,y,z): (%d, %d, %d)\n",

? ? ? ? ? ? ? deviceProp.maxGridSize[0],

? ? ? ? ? ? ? deviceProp.maxGridSize[1],

? ? ? ? ? ? ? deviceProp.maxGridSize[2]);

? ? ? ? printf("? Maximum memory pitch:? ? ? ? ? ? ? ? ? ? ? ? ? %lu bytes\n", deviceProp.memPitch);

? ? ? ? printf("? Texture alignment:? ? ? ? ? ? ? ? ? ? ? ? ? ? %lu bytes\n", deviceProp.textureAlignment);

? ? ? ? printf("? Concurrent copy and kernel execution:? ? ? ? ? %s with %d copy engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount);

? ? ? ? printf("? Run time limit on kernels:? ? ? ? ? ? ? ? ? ? %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");

? ? ? ? printf("? Integrated GPU sharing Host Memory:? ? ? ? ? ? %s\n", deviceProp.integrated ? "Yes" : "No");

? ? ? ? printf("? Support host page-locked memory mapping:? ? ? %s\n", deviceProp.canMapHostMemory ? "Yes" : "No");

? ? ? ? printf("? Alignment requirement for Surfaces:? ? ? ? ? ? %s\n", deviceProp.surfaceAlignment ? "Yes" : "No");

? ? ? ? printf("? Device has ECC support:? ? ? ? ? ? ? ? ? ? ? ? %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled");

#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)

? ? ? ? printf("? CUDA Device Driver Mode (TCC or WDDM):? ? ? ? %s\n", deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)" : "WDDM (Windows Display Driver Model)");

#endif

? ? ? ? printf("? Device supports Unified Addressing (UVA):? ? ? %s\n", deviceProp.unifiedAddressing ? "Yes" : "No");

? ? ? ? printf("? Supports Cooperative Kernel Launch:? ? ? ? ? ? %s\n", deviceProp.cooperativeLaunch ? "Yes" : "No");

? ? ? ? printf("? Supports MultiDevice Co-op Kernel Launch:? ? ? %s\n", deviceProp.cooperativeMultiDeviceLaunch ? "Yes" : "No");

? ? ? ? printf("? Device PCI Domain ID / Bus ID / location ID:? %d / %d / %d\n", deviceProp.pciDomainID, deviceProp.pciBusID, deviceProp.pciDeviceID);

? ? ? ? const char *sComputeMode[] =

? ? ? ? {

? ? ? ? ? ? "Default (multiple host threads can use ::cudaSetDevice() with device simultaneously)",

? ? ? ? ? ? "Exclusive (only one host thread in one process is able to use ::cudaSetDevice() with this device)",

? ? ? ? ? ? "Prohibited (no host thread can use ::cudaSetDevice() with this device)",

? ? ? ? ? ? "Exclusive Process (many threads in one process is able to use ::cudaSetDevice() with this device)",

? ? ? ? ? ? "Unknown",

? ? ? ? ? ? NULL

? ? ? ? };

? ? ? ? printf("? Compute Mode:\n");

? ? ? ? printf("? ? < %s >\n", sComputeMode[deviceProp.computeMode]);

? ? }

? ? // If there are 2 or more GPUs, query to determine whether RDMA is supported

? ? if (deviceCount >= 2)

? ? {

? ? ? ? cudaDeviceProp prop[64];

? ? ? ? int gpuid[64]; // we want to find the first two GPUs that can support P2P

? ? ? ? int gpu_p2p_count = 0;

? ? ? ? for (int i=0; i < deviceCount; i++)

? ? ? ? {

? ? ? ? ? ? checkCudaErrors(cudaGetDeviceProperties(&prop[i], i));

? ? ? ? ? ? // Only boards based on Fermi or later can support P2P

? ? ? ? ? ? if ((prop[i].major >= 2)

#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)

? ? ? ? ? ? ? ? // on Windows (64-bit), the Tesla Compute Cluster driver for windows must be enabled to support this

? ? ? ? ? ? ? ? && prop[i].tccDriver

#endif

? ? ? ? ? ? ? )

? ? ? ? ? ? {

? ? ? ? ? ? ? ? // This is an array of P2P capable GPUs

? ? ? ? ? ? ? ? gpuid[gpu_p2p_count++] = i;

? ? ? ? ? ? }

? ? ? ? }

? ? ? ? // Show all the combinations of support P2P GPUs

? ? ? ? int can_access_peer;

? ? ? ? if (gpu_p2p_count >= 2)

? ? ? ? {

? ? ? ? ? ? for (int i = 0; i < gpu_p2p_count; i++)

? ? ? ? ? ? {

? ? ? ? ? ? ? ? for (int j = 0; j < gpu_p2p_count; j++)

? ? ? ? ? ? ? ? {

? ? ? ? ? ? ? ? ? ? if (gpuid[i] == gpuid[j])

? ? ? ? ? ? ? ? ? ? {

? ? ? ? ? ? ? ? ? ? ? ? continue;

? ? ? ? ? ? ? ? ? ? }

? ? ? ? ? ? ? ? ? ? checkCudaErrors(cudaDeviceCanAccessPeer(&can_access_peer, gpuid[i], gpuid[j]));

? ? ? ? ? ? ? ? ? ? ? ? printf("> Peer access from %s (GPU%d) -> %s (GPU%d) : %s\n", prop[gpuid[i]].name, gpuid[i],

? ? ? ? ? ? ? ? ? ? ? ? ? prop[gpuid[j]].name, gpuid[j] ,

? ? ? ? ? ? ? ? ? ? ? ? ? can_access_peer ? "Yes" : "No");

? ? ? ? ? ? ? ? }

? ? ? ? ? ? }

? ? ? ? }

? ? }

? ? // csv masterlog info

? ? // *****************************

? ? // exe and CUDA driver name

? ? printf("\n");

? ? std::string sProfileString = "deviceQuery, CUDA Driver = CUDART";

? ? char cTemp[16];

? ? // driver version

? ? sProfileString += ", CUDA Driver Version = ";

#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)

? ? sprintf_s(cTemp, 10, "%d.%d", driverVersion/1000, (driverVersion%100)/10);

#else

? ? sprintf(cTemp, "%d.%d", driverVersion/1000, (driverVersion%100)/10);

#endif

? ? sProfileString +=? cTemp;

? ? // Runtime version

? ? sProfileString += ", CUDA Runtime Version = ";

#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)

? ? sprintf_s(cTemp, 10, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10);

#else

? ? sprintf(cTemp, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10);

#endif

? ? sProfileString +=? cTemp;

? ? // Device count

? ? sProfileString += ", NumDevs = ";

#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)

? ? sprintf_s(cTemp, 10, "%d", deviceCount);

#else

? ? sprintf(cTemp, "%d", deviceCount);

#endif

? ? sProfileString += cTemp;

? ? sProfileString += "\n";

? ? printf("%s", sProfileString.c_str());

? ? printf("Result = PASS\n");

? ? // finish

? ? exit(EXIT_SUCCESS);

}

?著作權(quán)歸作者所有,轉(zhuǎn)載或內(nèi)容合作請聯(lián)系作者
  • 序言:七十年代末,一起剝皮案震驚了整個濱河市,隨后出現(xiàn)的幾起案子,更是在濱河造成了極大的恐慌,老刑警劉巖请垛,帶你破解...
    沈念sama閱讀 212,816評論 6 492
  • 序言:濱河連續(xù)發(fā)生了三起死亡事件,死亡現(xiàn)場離奇詭異,居然都是意外死亡枫绅,警方通過查閱死者的電腦和手機(jī),發(fā)現(xiàn)死者居然都...
    沈念sama閱讀 90,729評論 3 385
  • 文/潘曉璐 我一進(jìn)店門硼端,熙熙樓的掌柜王于貴愁眉苦臉地迎上來并淋,“玉大人,你說我怎么就攤上這事珍昨∠氐ⅲ” “怎么了?”我有些...
    開封第一講書人閱讀 158,300評論 0 348
  • 文/不壞的土叔 我叫張陵镣典,是天一觀的道長兔毙。 經(jīng)常有香客問我,道長兄春,這世上最難降的妖魔是什么耀盗? 我笑而不...
    開封第一講書人閱讀 56,780評論 1 285
  • 正文 為了忘掉前任倘要,我火速辦了婚禮互广,結(jié)果婚禮上倦青,老公的妹妹穿的比我還像新娘。我一直安慰自己芜茵,他們只是感情好蜻懦,可當(dāng)我...
    茶點(diǎn)故事閱讀 65,890評論 6 385
  • 文/花漫 我一把揭開白布。 她就那樣靜靜地躺著夕晓,像睡著了一般宛乃。 火紅的嫁衣襯著肌膚如雪。 梳的紋絲不亂的頭發(fā)上蒸辆,一...
    開封第一講書人閱讀 50,084評論 1 291
  • 那天征炼,我揣著相機(jī)與錄音,去河邊找鬼躬贡。 笑死谆奥,一個胖子當(dāng)著我的面吹牛,可吹牛的內(nèi)容都是我干的拂玻。 我是一名探鬼主播酸些,決...
    沈念sama閱讀 39,151評論 3 410
  • 文/蒼蘭香墨 我猛地睜開眼宰译,長吁一口氣:“原來是場噩夢啊……” “哼!你這毒婦竟也來了魄懂?” 一聲冷哼從身側(cè)響起沿侈,我...
    開封第一講書人閱讀 37,912評論 0 268
  • 序言:老撾萬榮一對情侶失蹤,失蹤者是張志新(化名)和其女友劉穎市栗,沒想到半個月后缀拭,有當(dāng)?shù)厝嗽跇淞掷锇l(fā)現(xiàn)了一具尸體,經(jīng)...
    沈念sama閱讀 44,355評論 1 303
  • 正文 獨(dú)居荒郊野嶺守林人離奇死亡填帽,尸身上長有42處帶血的膿包…… 初始之章·張勛 以下內(nèi)容為張勛視角 年9月15日...
    茶點(diǎn)故事閱讀 36,666評論 2 327
  • 正文 我和宋清朗相戀三年蛛淋,在試婚紗的時候發(fā)現(xiàn)自己被綠了。 大學(xué)時的朋友給我發(fā)了我未婚夫和他白月光在一起吃飯的照片篡腌。...
    茶點(diǎn)故事閱讀 38,809評論 1 341
  • 序言:一個原本活蹦亂跳的男人離奇死亡褐荷,死狀恐怖,靈堂內(nèi)的尸體忽然破棺而出嘹悼,到底是詐尸還是另有隱情叛甫,我是刑警寧澤,帶...
    沈念sama閱讀 34,504評論 4 334
  • 正文 年R本政府宣布绘迁,位于F島的核電站,受9級特大地震影響卒密,放射性物質(zhì)發(fā)生泄漏缀台。R本人自食惡果不足惜,卻給世界環(huán)境...
    茶點(diǎn)故事閱讀 40,150評論 3 317
  • 文/蒙蒙 一哮奇、第九天 我趴在偏房一處隱蔽的房頂上張望膛腐。 院中可真熱鬧,春花似錦鼎俘、人聲如沸哲身。這莊子的主人今日做“春日...
    開封第一講書人閱讀 30,882評論 0 21
  • 文/蒼蘭香墨 我抬頭看了看天上的太陽勘天。三九已至,卻和暖如春捉邢,著一層夾襖步出監(jiān)牢的瞬間脯丝,已是汗流浹背。 一陣腳步聲響...
    開封第一講書人閱讀 32,121評論 1 267
  • 我被黑心中介騙來泰國打工伏伐, 沒想到剛下飛機(jī)就差點(diǎn)兒被人妖公主榨干…… 1. 我叫王不留宠进,地道東北人。 一個月前我還...
    沈念sama閱讀 46,628評論 2 362
  • 正文 我出身青樓藐翎,卻偏偏與公主長得像材蹬,于是被迫代替她去往敵國和親实幕。 傳聞我的和親對象是個殘疾皇子,可洞房花燭夜當(dāng)晚...
    茶點(diǎn)故事閱讀 43,724評論 2 351

推薦閱讀更多精彩內(nèi)容