我是靠谱客的博主 超帅夏天,这篇文章主要介绍HCP_S1200数据集下载,现在分享给大家,希望可以做个参考。

HCP_dataset
Mapping the human brain is one of the great scientific challenges of the 21st century.
The HCP( The Human Connectome Project) is mapping the healthy human connectome by collecting and freely distributing neuroimaging and behavioral data on 1,200 normal young adults, aged 22-35. Using greatly improved methods for data acquisition, analysis, and sharing, the HCP has provided the scientific community with data and discoveries that greatly enhance our understanding of human brain structure, function, and connectivity and their relationships to behavior. Also ,it is providing a treasure trove of neuroimaging and behavioral data at an unprecedented level of detail.
About Questions

import boto3
import os
import logging
import datetime
from boto3.session import Session

bucketName = 'hcp-openaccess'
prefix = 'HCP_1200'
outputPath = '/home/ec2-user/SageMaker/HCP_dataset'
access_key = 'AKIAXO65CT57HVRCTMH4'# [你的 aws_access_key]
secret_key = 'XA6zzMixA9ci15pEZ24zjgLCOuoiWdiSRUdaPDkv' # [你的 aws_secret_key]
bucketName = 'hcp-openaccess'
if not os.path.exists(outputPath):
    os.makedirs(outputPath)
session = Session(aws_access_key_id=access_key,aws_secret_access_key=secret_key)
s3 = session.resource('s3')
theTime = datetime.datetime.now().strftime('%Y_%m_%d-%H-%M_%S')
#os.makedirs(theTime)

logger = logging.getLogger('script')
formatter = logging.Formatter('%(asctime)s[line:%(lineno)d] - %(levelname)s: %(message)s')
logger.setLevel(level = logging.DEBUG)
logger.propagate = False

stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)

logger.addHandler(stream_handler)

bucket = s3.Bucket(bucketName)
logger.info('Bucket built!')

with open('./subjects.txt', 'r') as fr:
        for subject_number in fr.readlines():
            subject_number = subject_number.strip()
            keyList = bucket.objects.filter(Prefix = prefix + '/{}/MNINonLinear/Results/tfMRI'.format(subject_number))
            keyList = [key.key for key in keyList]
            keyList = [x for x in keyList if '_LR.nii.gz' in x ]
            totalNumber = len(keyList)
            for idx,tarPath in enumerate(keyList):
                downloadPath = os.path.join(outputPath,tarPath)
                #downloadDir = os.path.dirname(downloadPath)
                downloadPath1 = os.path.join(outputPath,subject_number+'_'+tarPath.split('/')[-1].split('_')[1]+'.nii.gz')
                #if not os.path.exists(downloadDir):
                 #   os.makedirs(downloadDir)
                try:
                    if not os.path.exists(downloadPath1):
                        bucket.download_file(tarPath,downloadPath1)
                        logger.info('%s: %s downloaded! %d/%d',subject_number,tarPath.split('/')[-1],idx+1,totalNumber)
                    else :
                        logger.info('%s: %s already exists! %d/%d',subject_number,tarPath.split('/')[-1],idx+1,totalNumber)
                except Exception as exc:
                    logger.error('{}'.format(str(exc)))
            logger.info('%s completed!', subject_number)

with open('./subjects.txt', 'r') as fr:
    with open('/home/ec2-user/SageMaker/Models_HCP/dt1.txt', 'w') as fr2:
        for subject_number in fr.readlines():
            subject_number = subject_number.strip()
            keyList = bucket.objects.filter(Prefix = prefix + '/{}/MNINonLinear/Results/tfMRI'.format(subject_number))
            keyList = [key.key for key in keyList]
            keyList = [x for x in keyList if '_LR.nii.gz' in x ]
            totalNumber = len(keyList)
            for idx,tarPath in enumerate(keyList):
                downloadPath1 = os.path.join(outputPath,subject_number+'_'+tarPath.split('/')[-1].split('_')[1]+'.nii.gz')
                fr2.write(subject_number+'_'+tarPath.split('/')[-1].split('_')[1]+'.nii.gzn')

for i in $(ls *.gz);do gzip -d $i;done
for i in $(ls *.gz);do rm $i;done

The Last

import boto3
import os
import logging
import datetime
from boto3.session import Session

bucketName = 'hcp-openaccess'
prefix = 'HCP_1200'
#outputPath = '/home/ec2-user/SageMaker/HCP_dataset'
#outputPath = 'E:/'
outputPath = '/home/ec2-user/SageMaker/HCP_dataset'
access_key = 'AKIAXO65CT57HVRCTMH4'# [你的 aws_access_key]
secret_key = 'XA6zzMixA9ci15pEZ24zjgLCOuoiWdiSRUdaPDkv' # [你的 aws_secret_key]
bucketName = 'hcp-openaccess'
if not os.path.exists(outputPath):
    os.makedirs(outputPath)
session = Session(aws_access_key_id=access_key,aws_secret_access_key=secret_key)
s3 = session.resource('s3')
theTime = datetime.datetime.now().strftime('%Y_%m_%d-%H-%M_%S')
#os.makedirs(theTime)

logger = logging.getLogger('script')
formatter = logging.Formatter('%(asctime)s[line:%(lineno)d] - %(levelname)s: %(message)s')
logger.setLevel(level = logging.DEBUG)
logger.propagate = False

stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(formatter)

logger.addHandler(stream_handler)

bucket = s3.Bucket(bucketName)
logger.info('Bucket built!')

with open('./subjects.txt', 'r') as fr:
        for subject_number in fr.readlines():
            subject_number = subject_number.strip()
            keyList = bucket.objects.filter(Prefix = prefix + '/{}/MNINonLinear/Results/tfMRI_'.format(subject_number))
            keyList = [key.key for key in keyList]
            keyList = [x for x in keyList if 'cope1.dtseries.nii' in x and 'var' not in x ]
            totalNumber = len(keyList)
            for idx,tarPath in enumerate(keyList):
                downloadPath = os.path.join(outputPath,tarPath)
                downloadPath1 = os.path.join(outputPath,subject_number+'_'+tarPath.split('/')[5].split('_')[1]+'_'+tarPath.split('/')[-1])
                #downloadDir = os.path.dirname(downloadPath)
                #downloadPath1 = os.path.join(outputPath,subject_number+'_'+tarPath.split('/')[-1].split('_')[1]+'.nii.gz')
                #if not os.path.exists(downloadDir):
                 #   os.makedirs(downloadDir)
                try:
                    if not os.path.exists(downloadPath1):
                        bucket.download_file(tarPath,downloadPath1)
                        logger.info('%s: %s downloaded! %d/%d',subject_number,tarPath.split('/')[-1],idx+1,totalNumber)
                    else :
                        logger.info('%s: %s already exists! %d/%d',subject_number,tarPath.split('/')[-1],idx+1,totalNumber)
                except Exception as exc:
                    logger.error('{}'.format(str(exc)))
            logger.info('%s completed!', subject_number)

import os

file_path = '/home/ec2-user/SageMaker/HCP_dataset'
path_list = os.listdir(file_path)  # os.listdir(file)会历遍文件夹内的文件并返回一个列表
print(path_list)
path_name = []  # 把文件列表写入save.txt中


def saveList(pathName):
    with open('/home/ec2-user/SageMaker/Models_HCP/dt1.txt','w') as f:
        for file_name in pathName:
            f.write(file_name+ "n")

saveList(path_list)

test:

import torch
print(torch.cuda.is_available())
ngpu= 1
# Decide which device we want to run on
device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu")
print(device)
print(torch.cuda.get_device_name(0))
print(torch.rand(3,3).cuda()) 

最后

以上就是超帅夏天最近收集整理的关于HCP_S1200数据集下载的全部内容,更多相关HCP_S1200数据集下载内容请搜索靠谱客的其他文章。

本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
点赞(51)

评论列表共有 0 条评论

立即
投稿
返回
顶部