Dataset Viewer
Auto-converted to Parquet
problem_id
string
source
string
task_type
string
in_source_id
string
prompt
string
golden_diff
string
verification_info
string
gh_patches_debug_36408
rasdani/github-patches
git_diff
mne-tools__mne-bids-74
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove .gz extension for fif: It's no longer part of the validator Throughout the current state of MNE BIDS, the file ending `.gz` is used as an identifier for FIF files: https://github.com/mne-tools/mne-bids/blob/c73ce744d30be87645e1648754b488f7572307f3/mne_bids/meg_bids.py#L33-L34 Can we change this to `fif.gz`? I am just concerned, because other files can be `.gz` and have nothing to do with FIF. </issue> <code> [start of mne_bids/io.py] 1 """Check whether a file format is supported by BIDS and then load it.""" 2 # Authors: Mainak Jas <[email protected]> 3 # Alexandre Gramfort <[email protected]> 4 # Teon Brooks <[email protected]> 5 # Chris Holdgraf <[email protected]> 6 # Stefan Appelhoff <[email protected]> 7 # 8 # License: BSD (3-clause) 9 from mne import io 10 import os 11 12 ALLOWED_EXTENSIONS = ['.con', '.sqd', '.fif', '.gz', '.pdf', '.ds'] 13 14 15 def _parse_ext(raw_fname, verbose=False): 16 """Split a filename into its name and extension.""" 17 fname, ext = os.path.splitext(raw_fname) 18 # BTi data is the only file format that does not have a file extension 19 if ext == '': 20 if verbose is True: 21 print('Found no extension for raw file, assuming "BTi" format and ' 22 'appending extension .pdf') 23 ext = '.pdf' 24 return fname, ext 25 26 27 def _read_raw(raw_fname, electrode=None, hsp=None, hpi=None, config=None, 28 verbose=None): 29 """Read a raw file into MNE, making inferences based on extension.""" 30 fname, ext = _parse_ext(raw_fname) 31 32 # MEG File Types 33 # -------------- 34 # KIT systems 35 if ext in ['.con', '.sqd']: 36 raw = io.read_raw_kit(raw_fname, elp=electrode, hsp=hsp, 37 mrk=hpi, preload=False) 38 39 # Neuromag or converted-to-fif systems 40 elif ext in ['.fif', '.gz']: 41 raw = io.read_raw_fif(raw_fname, preload=False) 42 43 # BTi systems 44 elif ext == '.pdf': 45 if os.path.isfile(raw_fname): 46 raw = io.read_raw_bti(raw_fname, config_fname=config, 47 head_shape_fname=hsp, 48 preload=False, verbose=verbose) 49 50 # CTF systems 51 elif ext == '.ds': 52 raw = io.read_raw_ctf(raw_fname) 53 54 # No supported data found ... 55 # --------------------------- 56 else: 57 raise ValueError("Raw file name extension must be one of %\n" 58 "Got %" % (ALLOWED_EXTENSIONS, ext)) 59 return raw 60 [end of mne_bids/io.py] [start of mne_bids/mne_bids.py] 1 """Make BIDS compatible directory structures and infer meta data from MNE.""" 2 # Authors: Mainak Jas <[email protected]> 3 # Alexandre Gramfort <[email protected]> 4 # Teon Brooks <[email protected]> 5 # Chris Holdgraf <[email protected]> 6 # Stefan Appelhoff <[email protected]> 7 # 8 # License: BSD (3-clause) 9 10 import os 11 import shutil as sh 12 import pandas as pd 13 from collections import defaultdict, OrderedDict 14 15 import numpy as np 16 from mne.io.constants import FIFF 17 from mne.io.pick import channel_type 18 from mne.io import BaseRaw 19 from mne.channels.channels import _unit2human 20 from mne.externals.six import string_types 21 22 from datetime import datetime 23 from warnings import warn 24 25 from .utils import (make_bids_filename, make_bids_folders, 26 make_dataset_description, _write_json, 27 _read_events, _mkdir_p) 28 from .io import (_parse_ext, _read_raw, ALLOWED_EXTENSIONS) 29 30 31 ALLOWED_KINDS = ['meg', 'ieeg'] 32 orientation = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.gz': 'RAS', 33 '.pdf': 'ALS', '.ds': 'ALS'} 34 35 units = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.gz': 'm', '.pdf': 'm', 36 '.ds': 'cm'} 37 38 manufacturers = {'.sqd': 'KIT/Yokogawa', '.con': 'KIT/Yokogawa', 39 '.fif': 'Elekta', '.gz': 'Elekta', '.pdf': '4D Magnes', 40 '.ds': 'CTF'} 41 42 43 def _channels_tsv(raw, fname, verbose): 44 """Create a channels.tsv file and save it. 45 46 Parameters 47 ---------- 48 raw : instance of Raw 49 The data as MNE-Python Raw object. 50 fname : str 51 Filename to save the channels.tsv to. 52 verbose : bool 53 Set verbose output to true or false. 54 55 """ 56 map_chs = defaultdict(lambda: 'OTHER') 57 map_chs.update(grad='MEGGRAD', mag='MEGMAG', stim='TRIG', eeg='EEG', 58 ecog='ECOG', seeg='SEEG', eog='EOG', ecg='ECG', misc='MISC', 59 resp='RESPONSE', ref_meg='REFMEG') 60 map_desc = defaultdict(lambda: 'Other type of channel') 61 map_desc.update(grad='Gradiometer', mag='Magnetometer', 62 stim='Trigger', 63 eeg='ElectroEncephaloGram', 64 ecog='Electrocorticography', 65 seeg='StereoEEG', 66 ecg='ElectroCardioGram', 67 eog='ElectrOculoGram', misc='Miscellaneous', 68 ref_meg='Reference channel') 69 70 status, ch_type, description = list(), list(), list() 71 for idx, ch in enumerate(raw.info['ch_names']): 72 status.append('bad' if ch in raw.info['bads'] else 'good') 73 ch_type.append(map_chs[channel_type(raw.info, idx)]) 74 description.append(map_desc[channel_type(raw.info, idx)]) 75 low_cutoff, high_cutoff = (raw.info['highpass'], raw.info['lowpass']) 76 units = [_unit2human.get(ch_i['unit'], 'n/a') for ch_i in raw.info['chs']] 77 units = [u if u not in ['NA'] else 'n/a' for u in units] 78 n_channels = raw.info['nchan'] 79 sfreq = raw.info['sfreq'] 80 81 df = pd.DataFrame(OrderedDict([ 82 ('name', raw.info['ch_names']), 83 ('type', ch_type), 84 ('units', units), 85 ('description', description), 86 ('sampling_frequency', np.full((n_channels), sfreq)), 87 ('low_cutoff', np.full((n_channels), low_cutoff)), 88 ('high_cutoff', np.full((n_channels), high_cutoff)), 89 ('status', status)])) 90 df.to_csv(fname, sep='\t', index=False, na_rep='n/a') 91 92 if verbose: 93 print(os.linesep + "Writing '%s'..." % fname + os.linesep) 94 print(df.head()) 95 96 return fname 97 98 99 def _events_tsv(events, raw, fname, trial_type, verbose): 100 """Create an events.tsv file and save it. 101 102 This function will write the mandatory 'onset', and 'duration' columns as 103 well as the optional 'event_value' and 'event_sample'. The 'event_value' 104 corresponds to the marker value as found in the TRIG channel of the 105 recording. In addition, the 'trial_type' field can be written. 106 107 Parameters 108 ---------- 109 events : array, shape = (n_events, 3) 110 The first column contains the event time in samples and the third 111 column contains the event id. The second column is ignored for now but 112 typically contains the value of the trigger channel either immediately 113 before the event or immediately after. 114 raw : instance of Raw 115 The data as MNE-Python Raw object. 116 fname : str 117 Filename to save the events.tsv to. 118 event_id : dict | None 119 Dictionary mapping a brief description key to an event id (value). For 120 example {'Go': 1, 'No Go': 2}. 121 verbose : bool 122 Set verbose output to true or false. 123 124 Notes 125 ----- 126 The function writes durations of zero for each event. 127 128 """ 129 # Start by filling all data that we know into a df 130 first_samp = raw.first_samp 131 sfreq = raw.info['sfreq'] 132 events[:, 0] -= first_samp 133 134 data = OrderedDict([('onset', events[:, 0]), 135 ('duration', np.zeros(events.shape[0])), 136 ('trial_type', events[:, 2]), 137 ('event_value', events[:, 2]), 138 ('event_sample', events[:, 0])]) 139 140 df = pd.DataFrame.from_dict(data) 141 142 # Now check if trial_type is specified or should be removed 143 if trial_type: 144 trial_type_map = {v: k for k, v in trial_type.items()} 145 df.trial_type = df.trial_type.map(trial_type_map) 146 else: 147 df.drop(labels=['trial_type'], axis=1, inplace=True) 148 149 # Onset column needs to be specified in seconds 150 df.onset /= sfreq 151 152 # Save to file 153 df.to_csv(fname, sep='\t', index=False, na_rep='n/a') 154 if verbose: 155 print(os.linesep + "Writing '%s'..." % fname + os.linesep) 156 print(df.head()) 157 158 return fname 159 160 161 def _scans_tsv(raw, raw_fname, fname, verbose): 162 """Create a scans.tsv file and save it. 163 164 Parameters 165 ---------- 166 raw : instance of Raw 167 The data as MNE-Python Raw object. 168 raw_fname : str 169 Relative path to the raw data file. 170 fname : str 171 Filename to save the scans.tsv to. 172 verbose : bool 173 Set verbose output to true or false. 174 175 """ 176 # get MEASurement date from the data info 177 meas_date = raw.info['meas_date'] 178 if isinstance(meas_date, (np.ndarray, list)): 179 meas_date = meas_date[0] 180 181 if meas_date is None: 182 acq_time = 'n/a' 183 else: 184 acq_time = datetime.fromtimestamp( 185 meas_date).strftime('%Y-%m-%dT%H:%M:%S') 186 187 df = pd.DataFrame(data={'filename': ['%s' % raw_fname], 188 'acq_time': [acq_time]}, 189 columns=['filename', 'acq_time']) 190 191 df.to_csv(fname, sep='\t', index=False, na_rep='n/a') 192 193 if verbose: 194 print(os.linesep + "Writing '%s'..." % fname + os.linesep) 195 print(df.head()) 196 197 return fname 198 199 200 def _coordsystem_json(raw, unit, orient, manufacturer, fname, verbose): 201 """Create a coordsystem.json file and save it. 202 203 Parameters 204 ---------- 205 raw : instance of Raw 206 The data as MNE-Python Raw object. 207 unit : str 208 Units to be used in the coordsystem specification. 209 orient : str 210 Used to define the coordinate system for the head coils. 211 manufacturer : str 212 Used to define the coordinate system for the MEG sensors. 213 fname : str 214 Filename to save the coordsystem.json to. 215 verbose : bool 216 Set verbose output to true or false. 217 218 """ 219 dig = raw.info['dig'] 220 coords = dict() 221 fids = {d['ident']: d for d in dig if d['kind'] == 222 FIFF.FIFFV_POINT_CARDINAL} 223 if fids: 224 if FIFF.FIFFV_POINT_NASION in fids: 225 coords['NAS'] = fids[FIFF.FIFFV_POINT_NASION]['r'].tolist() 226 if FIFF.FIFFV_POINT_LPA in fids: 227 coords['LPA'] = fids[FIFF.FIFFV_POINT_LPA]['r'].tolist() 228 if FIFF.FIFFV_POINT_RPA in fids: 229 coords['RPA'] = fids[FIFF.FIFFV_POINT_RPA]['r'].tolist() 230 231 hpi = {d['ident']: d for d in dig if d['kind'] == FIFF.FIFFV_POINT_HPI} 232 if hpi: 233 for ident in hpi.keys(): 234 coords['coil%d' % ident] = hpi[ident]['r'].tolist() 235 236 coord_frame = set([dig[ii]['coord_frame'] for ii in range(len(dig))]) 237 if len(coord_frame) > 1: 238 err = 'All HPI and Fiducials must be in the same coordinate frame.' 239 raise ValueError(err) 240 241 fid_json = {'MEGCoordinateSystem': manufacturer, 242 'MEGCoordinateUnits': unit, # XXX validate this 243 'HeadCoilCoordinates': coords, 244 'HeadCoilCoordinateSystem': orient, 245 'HeadCoilCoordinateUnits': unit # XXX validate this 246 } 247 _write_json(fid_json, fname) 248 249 return fname 250 251 252 def _sidecar_json(raw, task, manufacturer, fname, kind, 253 verbose=True): 254 """Create a sidecar json file depending on the kind and save it. 255 256 The sidecar json file provides meta data about the data of a certain kind. 257 258 Parameters 259 ---------- 260 raw : instance of Raw 261 The data as MNE-Python Raw object. 262 task : str 263 Name of the task the data is based on. 264 manufacturer : str 265 Manufacturer of the acquisition system. For MEG also used to define the 266 coordinate system for the MEG sensors. 267 fname : str 268 Filename to save the sidecar json to. 269 kind : str 270 Type of the data as in ALLOWED_KINDS. 271 verbose : bool 272 Set verbose output to true or false. Defaults to true. 273 274 """ 275 sfreq = raw.info['sfreq'] 276 powerlinefrequency = raw.info.get('line_freq', None) 277 if powerlinefrequency is None: 278 warn('No line frequency found, defaulting to 50 Hz') 279 powerlinefrequency = 50 280 281 n_megchan = len([ch for ch in raw.info['chs'] 282 if ch['kind'] == FIFF.FIFFV_MEG_CH]) 283 n_megrefchan = len([ch for ch in raw.info['chs'] 284 if ch['kind'] == FIFF.FIFFV_REF_MEG_CH]) 285 n_eegchan = len([ch for ch in raw.info['chs'] 286 if ch['kind'] == FIFF.FIFFV_EEG_CH]) 287 n_ecogchan = len([ch for ch in raw.info['chs'] 288 if ch['kind'] == FIFF.FIFFV_ECOG_CH]) 289 n_seegchan = len([ch for ch in raw.info['chs'] 290 if ch['kind'] == FIFF.FIFFV_SEEG_CH]) 291 n_eogchan = len([ch for ch in raw.info['chs'] 292 if ch['kind'] == FIFF.FIFFV_EOG_CH]) 293 n_ecgchan = len([ch for ch in raw.info['chs'] 294 if ch['kind'] == FIFF.FIFFV_ECG_CH]) 295 n_emgchan = len([ch for ch in raw.info['chs'] 296 if ch['kind'] == FIFF.FIFFV_EMG_CH]) 297 n_miscchan = len([ch for ch in raw.info['chs'] 298 if ch['kind'] == FIFF.FIFFV_MISC_CH]) 299 n_stimchan = len([ch for ch in raw.info['chs'] 300 if ch['kind'] == FIFF.FIFFV_STIM_CH]) 301 302 # Define modality-specific JSON dictionaries 303 ch_info_json_common = [ 304 ('TaskName', task), 305 ('Manufacturer', manufacturer), 306 ('PowerLineFrequency', powerlinefrequency)] 307 ch_info_json_meg = [ 308 ('SamplingFrequency', sfreq), 309 ("DewarPosition", "XXX"), 310 ("DigitizedLandmarks", False), 311 ("DigitizedHeadPoints", False), 312 ("SoftwareFilters", "n/a"), 313 ('MEGChannelCount', n_megchan), 314 ('MEGREFChannelCount', n_megrefchan)] 315 ch_info_json_ieeg = [ 316 ('ECOGChannelCount', n_ecogchan), 317 ('SEEGChannelCount', n_seegchan)] 318 ch_info_ch_counts = [ 319 ('EEGChannelCount', n_eegchan), 320 ('EOGChannelCount', n_eogchan), 321 ('ECGChannelCount', n_ecgchan), 322 ('EMGChannelCount', n_emgchan), 323 ('MiscChannelCount', n_miscchan), 324 ('TriggerChannelCount', n_stimchan)] 325 326 # Stitch together the complete JSON dictionary 327 ch_info_json = ch_info_json_common 328 if kind == 'meg': 329 append_kind_json = ch_info_json_meg 330 elif kind == 'ieeg': 331 append_kind_json = ch_info_json_ieeg 332 else: 333 raise ValueError('Unexpected "kind": {}' 334 ' Use one of: {}'.format(kind, ALLOWED_KINDS)) 335 336 ch_info_json += append_kind_json 337 ch_info_json += ch_info_ch_counts 338 ch_info_json = OrderedDict(ch_info_json) 339 340 _write_json(ch_info_json, fname, verbose=verbose) 341 return fname 342 343 344 def raw_to_bids(subject_id, task, raw_file, output_path, session_id=None, 345 run=None, kind='meg', events_data=None, event_id=None, 346 hpi=None, electrode=None, hsp=None, config=None, 347 overwrite=True, verbose=True): 348 """Walk over a folder of files and create BIDS compatible folder. 349 350 Parameters 351 ---------- 352 subject_id : str 353 The subject name in BIDS compatible format ('01', '02', etc.) 354 task : str 355 Name of the task the data is based on. 356 raw_file : str | instance of mne.Raw 357 The raw data. If a string, it is assumed to be the path to the raw data 358 file. Otherwise it must be an instance of mne.Raw 359 output_path : str 360 The path of the BIDS compatible folder 361 session_id : str | None 362 The session name in BIDS compatible format. 363 run : int | None 364 The run number for this dataset. 365 kind : str, one of ('meg', 'ieeg') 366 The kind of data being converted. Defaults to "meg". 367 events_data : str | array | None 368 The events file. If a string, a path to the events file. If an array, 369 the MNE events array (shape n_events, 3). If None, events will be 370 inferred from the stim channel using `mne.find_events`. 371 event_id : dict | None 372 The event id dict used to create a 'trial_type' column in events.tsv 373 hpi : None | str | list of str 374 Marker points representing the location of the marker coils with 375 respect to the MEG Sensors, or path to a marker file. 376 If list, all of the markers will be averaged together. 377 electrode : None | str 378 Digitizer points representing the location of the fiducials and the 379 marker coils with respect to the digitized head shape, or path to a 380 file containing these points. 381 hsp : None | str | array, shape = (n_points, 3) 382 Digitizer head shape points, or path to head shape file. If more than 383 10`000 points are in the head shape, they are automatically decimated. 384 config : str | None 385 A path to the configuration file to use if the data is from a BTi 386 system. 387 overwrite : bool 388 If the file already exists, whether to overwrite it. 389 verbose : bool 390 If verbose is True, this will print a snippet of the sidecar files. If 391 False, no content will be printed. 392 393 """ 394 if isinstance(raw_file, string_types): 395 # We must read in the raw data 396 raw = _read_raw(raw_file, electrode=electrode, hsp=hsp, hpi=hpi, 397 config=config, verbose=verbose) 398 _, ext = _parse_ext(raw_file, verbose=verbose) 399 raw_fname = raw_file 400 elif isinstance(raw_file, BaseRaw): 401 # We got a raw mne object, get back the filename if possible 402 # Assume that if no filename attr exists, it's a fif file. 403 raw = raw_file.copy() 404 if hasattr(raw, 'filenames'): 405 _, ext = _parse_ext(raw.filenames[0], verbose=verbose) 406 raw_fname = raw.filenames[0] 407 else: 408 # FIXME: How to get the filename if no filenames attribute? 409 raw_fname = 'unknown_file_name' 410 ext = '.fif' 411 else: 412 raise ValueError('raw_file must be an instance of str or BaseRaw, ' 413 'got %s' % type(raw_file)) 414 data_path = make_bids_folders(subject=subject_id, session=session_id, 415 kind=kind, root=output_path, 416 overwrite=overwrite, 417 verbose=verbose) 418 if session_id is None: 419 ses_path = data_path 420 else: 421 ses_path = make_bids_folders(subject=subject_id, session=session_id, 422 root=output_path, 423 overwrite=False, 424 verbose=verbose) 425 426 # create filenames 427 scans_fname = make_bids_filename( 428 subject=subject_id, session=session_id, suffix='scans.tsv', 429 prefix=ses_path) 430 431 coordsystem_fname = make_bids_filename( 432 subject=subject_id, session=session_id, 433 suffix='coordsystem.json', prefix=data_path) 434 data_meta_fname = make_bids_filename( 435 subject=subject_id, session=session_id, task=task, run=run, 436 suffix='%s.json' % kind, prefix=data_path) 437 if ext in ['.fif', '.gz', '.ds']: 438 raw_file_bids = make_bids_filename( 439 subject=subject_id, session=session_id, task=task, run=run, 440 suffix='%s%s' % (kind, ext)) 441 else: 442 raw_folder = make_bids_filename( 443 subject=subject_id, session=session_id, task=task, run=run, 444 suffix='%s' % kind) 445 raw_file_bids = make_bids_filename( 446 subject=subject_id, session=session_id, task=task, run=run, 447 suffix='%s%s' % (kind, ext), prefix=raw_folder) 448 events_tsv_fname = make_bids_filename( 449 subject=subject_id, session=session_id, task=task, 450 run=run, suffix='events.tsv', prefix=data_path) 451 channels_fname = make_bids_filename( 452 subject=subject_id, session=session_id, task=task, run=run, 453 suffix='channels.tsv', prefix=data_path) 454 455 # Read in Raw object and extract metadata from Raw object if needed 456 if kind == 'meg': 457 orient = orientation[ext] 458 unit = units[ext] 459 manufacturer = manufacturers[ext] 460 else: 461 orient = 'n/a' 462 unit = 'n/a' 463 manufacturer = 'n/a' 464 465 # save stuff 466 if kind == 'meg': 467 _scans_tsv(raw, os.path.join(kind, raw_file_bids), scans_fname, 468 verbose) 469 _coordsystem_json(raw, unit, orient, manufacturer, coordsystem_fname, 470 verbose) 471 472 make_dataset_description(output_path, name=" ", 473 verbose=verbose) 474 _sidecar_json(raw, task, manufacturer, data_meta_fname, kind, 475 verbose) 476 _channels_tsv(raw, channels_fname, verbose) 477 478 events = _read_events(events_data, raw) 479 if len(events) > 0: 480 _events_tsv(events, raw, events_tsv_fname, event_id, verbose) 481 482 # set the raw file name to now be the absolute path to ensure the files 483 # are placed in the right location 484 raw_file_bids = os.path.join(data_path, raw_file_bids) 485 if os.path.exists(raw_file_bids) and not overwrite: 486 raise ValueError('"%s" already exists. Please set' 487 ' overwrite to True.' % raw_file_bids) 488 _mkdir_p(os.path.dirname(raw_file_bids)) 489 490 if verbose: 491 print('Writing data files to %s' % raw_file_bids) 492 493 if ext not in ALLOWED_EXTENSIONS: 494 raise ValueError('ext must be in %s, got %s' 495 % (''.join(ALLOWED_EXTENSIONS), ext)) 496 497 # for FIF, we need to re-save the file to fix the file pointer 498 # for files with multiple parts 499 if ext in ['.fif', '.gz']: 500 raw.save(raw_file_bids, overwrite=overwrite) 501 elif ext == '.ds': 502 sh.copytree(raw_fname, raw_file_bids) 503 else: 504 sh.copyfile(raw_fname, raw_file_bids) 505 506 return output_path 507 [end of mne_bids/mne_bids.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mne_bids/io.py b/mne_bids/io.py --- a/mne_bids/io.py +++ b/mne_bids/io.py @@ -9,7 +9,7 @@ from mne import io import os -ALLOWED_EXTENSIONS = ['.con', '.sqd', '.fif', '.gz', '.pdf', '.ds'] +ALLOWED_EXTENSIONS = ['.con', '.sqd', '.fif', '.pdf', '.ds'] def _parse_ext(raw_fname, verbose=False): @@ -37,7 +37,7 @@ mrk=hpi, preload=False) # Neuromag or converted-to-fif systems - elif ext in ['.fif', '.gz']: + elif ext in ['.fif']: raw = io.read_raw_fif(raw_fname, preload=False) # BTi systems diff --git a/mne_bids/mne_bids.py b/mne_bids/mne_bids.py --- a/mne_bids/mne_bids.py +++ b/mne_bids/mne_bids.py @@ -29,15 +29,13 @@ ALLOWED_KINDS = ['meg', 'ieeg'] -orientation = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.gz': 'RAS', - '.pdf': 'ALS', '.ds': 'ALS'} +orientation = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.pdf': 'ALS', + '.ds': 'ALS'} -units = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.gz': 'm', '.pdf': 'm', - '.ds': 'cm'} +units = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.pdf': 'm', '.ds': 'cm'} manufacturers = {'.sqd': 'KIT/Yokogawa', '.con': 'KIT/Yokogawa', - '.fif': 'Elekta', '.gz': 'Elekta', '.pdf': '4D Magnes', - '.ds': 'CTF'} + '.fif': 'Elekta', '.pdf': '4D Magnes', '.ds': 'CTF'} def _channels_tsv(raw, fname, verbose): @@ -434,7 +432,7 @@ data_meta_fname = make_bids_filename( subject=subject_id, session=session_id, task=task, run=run, suffix='%s.json' % kind, prefix=data_path) - if ext in ['.fif', '.gz', '.ds']: + if ext in ['.fif', '.ds']: raw_file_bids = make_bids_filename( subject=subject_id, session=session_id, task=task, run=run, suffix='%s%s' % (kind, ext)) @@ -496,7 +494,7 @@ # for FIF, we need to re-save the file to fix the file pointer # for files with multiple parts - if ext in ['.fif', '.gz']: + if ext in ['.fif']: raw.save(raw_file_bids, overwrite=overwrite) elif ext == '.ds': sh.copytree(raw_fname, raw_file_bids)
{"golden_diff": "diff --git a/mne_bids/io.py b/mne_bids/io.py\n--- a/mne_bids/io.py\n+++ b/mne_bids/io.py\n@@ -9,7 +9,7 @@\n from mne import io\n import os\n \n-ALLOWED_EXTENSIONS = ['.con', '.sqd', '.fif', '.gz', '.pdf', '.ds']\n+ALLOWED_EXTENSIONS = ['.con', '.sqd', '.fif', '.pdf', '.ds']\n \n \n def _parse_ext(raw_fname, verbose=False):\n@@ -37,7 +37,7 @@\n mrk=hpi, preload=False)\n \n # Neuromag or converted-to-fif systems\n- elif ext in ['.fif', '.gz']:\n+ elif ext in ['.fif']:\n raw = io.read_raw_fif(raw_fname, preload=False)\n \n # BTi systems\ndiff --git a/mne_bids/mne_bids.py b/mne_bids/mne_bids.py\n--- a/mne_bids/mne_bids.py\n+++ b/mne_bids/mne_bids.py\n@@ -29,15 +29,13 @@\n \n \n ALLOWED_KINDS = ['meg', 'ieeg']\n-orientation = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.gz': 'RAS',\n- '.pdf': 'ALS', '.ds': 'ALS'}\n+orientation = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.pdf': 'ALS',\n+ '.ds': 'ALS'}\n \n-units = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.gz': 'm', '.pdf': 'm',\n- '.ds': 'cm'}\n+units = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.pdf': 'm', '.ds': 'cm'}\n \n manufacturers = {'.sqd': 'KIT/Yokogawa', '.con': 'KIT/Yokogawa',\n- '.fif': 'Elekta', '.gz': 'Elekta', '.pdf': '4D Magnes',\n- '.ds': 'CTF'}\n+ '.fif': 'Elekta', '.pdf': '4D Magnes', '.ds': 'CTF'}\n \n \n def _channels_tsv(raw, fname, verbose):\n@@ -434,7 +432,7 @@\n data_meta_fname = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s.json' % kind, prefix=data_path)\n- if ext in ['.fif', '.gz', '.ds']:\n+ if ext in ['.fif', '.ds']:\n raw_file_bids = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s%s' % (kind, ext))\n@@ -496,7 +494,7 @@\n \n # for FIF, we need to re-save the file to fix the file pointer\n # for files with multiple parts\n- if ext in ['.fif', '.gz']:\n+ if ext in ['.fif']:\n raw.save(raw_file_bids, overwrite=overwrite)\n elif ext == '.ds':\n sh.copytree(raw_fname, raw_file_bids)\n", "issue": "Remove .gz extension for fif: It's no longer part of the validator\nThroughout the current state of MNE BIDS, the file ending `.gz` is used as an identifier for FIF files:\r\n\r\nhttps://github.com/mne-tools/mne-bids/blob/c73ce744d30be87645e1648754b488f7572307f3/mne_bids/meg_bids.py#L33-L34\r\n\r\nCan we change this to `fif.gz`? I am just concerned, because other files can be `.gz` and have nothing to do with FIF.\n", "before_files": [{"content": "\"\"\"Check whether a file format is supported by BIDS and then load it.\"\"\"\n# Authors: Mainak Jas <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Teon Brooks <[email protected]>\n# Chris Holdgraf <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD (3-clause)\nfrom mne import io\nimport os\n\nALLOWED_EXTENSIONS = ['.con', '.sqd', '.fif', '.gz', '.pdf', '.ds']\n\n\ndef _parse_ext(raw_fname, verbose=False):\n \"\"\"Split a filename into its name and extension.\"\"\"\n fname, ext = os.path.splitext(raw_fname)\n # BTi data is the only file format that does not have a file extension\n if ext == '':\n if verbose is True:\n print('Found no extension for raw file, assuming \"BTi\" format and '\n 'appending extension .pdf')\n ext = '.pdf'\n return fname, ext\n\n\ndef _read_raw(raw_fname, electrode=None, hsp=None, hpi=None, config=None,\n verbose=None):\n \"\"\"Read a raw file into MNE, making inferences based on extension.\"\"\"\n fname, ext = _parse_ext(raw_fname)\n\n # MEG File Types\n # --------------\n # KIT systems\n if ext in ['.con', '.sqd']:\n raw = io.read_raw_kit(raw_fname, elp=electrode, hsp=hsp,\n mrk=hpi, preload=False)\n\n # Neuromag or converted-to-fif systems\n elif ext in ['.fif', '.gz']:\n raw = io.read_raw_fif(raw_fname, preload=False)\n\n # BTi systems\n elif ext == '.pdf':\n if os.path.isfile(raw_fname):\n raw = io.read_raw_bti(raw_fname, config_fname=config,\n head_shape_fname=hsp,\n preload=False, verbose=verbose)\n\n # CTF systems\n elif ext == '.ds':\n raw = io.read_raw_ctf(raw_fname)\n\n # No supported data found ...\n # ---------------------------\n else:\n raise ValueError(\"Raw file name extension must be one of %\\n\"\n \"Got %\" % (ALLOWED_EXTENSIONS, ext))\n return raw\n", "path": "mne_bids/io.py"}, {"content": "\"\"\"Make BIDS compatible directory structures and infer meta data from MNE.\"\"\"\n# Authors: Mainak Jas <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Teon Brooks <[email protected]>\n# Chris Holdgraf <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os\nimport shutil as sh\nimport pandas as pd\nfrom collections import defaultdict, OrderedDict\n\nimport numpy as np\nfrom mne.io.constants import FIFF\nfrom mne.io.pick import channel_type\nfrom mne.io import BaseRaw\nfrom mne.channels.channels import _unit2human\nfrom mne.externals.six import string_types\n\nfrom datetime import datetime\nfrom warnings import warn\n\nfrom .utils import (make_bids_filename, make_bids_folders,\n make_dataset_description, _write_json,\n _read_events, _mkdir_p)\nfrom .io import (_parse_ext, _read_raw, ALLOWED_EXTENSIONS)\n\n\nALLOWED_KINDS = ['meg', 'ieeg']\norientation = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.gz': 'RAS',\n '.pdf': 'ALS', '.ds': 'ALS'}\n\nunits = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.gz': 'm', '.pdf': 'm',\n '.ds': 'cm'}\n\nmanufacturers = {'.sqd': 'KIT/Yokogawa', '.con': 'KIT/Yokogawa',\n '.fif': 'Elekta', '.gz': 'Elekta', '.pdf': '4D Magnes',\n '.ds': 'CTF'}\n\n\ndef _channels_tsv(raw, fname, verbose):\n \"\"\"Create a channels.tsv file and save it.\n\n Parameters\n ----------\n raw : instance of Raw\n The data as MNE-Python Raw object.\n fname : str\n Filename to save the channels.tsv to.\n verbose : bool\n Set verbose output to true or false.\n\n \"\"\"\n map_chs = defaultdict(lambda: 'OTHER')\n map_chs.update(grad='MEGGRAD', mag='MEGMAG', stim='TRIG', eeg='EEG',\n ecog='ECOG', seeg='SEEG', eog='EOG', ecg='ECG', misc='MISC',\n resp='RESPONSE', ref_meg='REFMEG')\n map_desc = defaultdict(lambda: 'Other type of channel')\n map_desc.update(grad='Gradiometer', mag='Magnetometer',\n stim='Trigger',\n eeg='ElectroEncephaloGram',\n ecog='Electrocorticography',\n seeg='StereoEEG',\n ecg='ElectroCardioGram',\n eog='ElectrOculoGram', misc='Miscellaneous',\n ref_meg='Reference channel')\n\n status, ch_type, description = list(), list(), list()\n for idx, ch in enumerate(raw.info['ch_names']):\n status.append('bad' if ch in raw.info['bads'] else 'good')\n ch_type.append(map_chs[channel_type(raw.info, idx)])\n description.append(map_desc[channel_type(raw.info, idx)])\n low_cutoff, high_cutoff = (raw.info['highpass'], raw.info['lowpass'])\n units = [_unit2human.get(ch_i['unit'], 'n/a') for ch_i in raw.info['chs']]\n units = [u if u not in ['NA'] else 'n/a' for u in units]\n n_channels = raw.info['nchan']\n sfreq = raw.info['sfreq']\n\n df = pd.DataFrame(OrderedDict([\n ('name', raw.info['ch_names']),\n ('type', ch_type),\n ('units', units),\n ('description', description),\n ('sampling_frequency', np.full((n_channels), sfreq)),\n ('low_cutoff', np.full((n_channels), low_cutoff)),\n ('high_cutoff', np.full((n_channels), high_cutoff)),\n ('status', status)]))\n df.to_csv(fname, sep='\\t', index=False, na_rep='n/a')\n\n if verbose:\n print(os.linesep + \"Writing '%s'...\" % fname + os.linesep)\n print(df.head())\n\n return fname\n\n\ndef _events_tsv(events, raw, fname, trial_type, verbose):\n \"\"\"Create an events.tsv file and save it.\n\n This function will write the mandatory 'onset', and 'duration' columns as\n well as the optional 'event_value' and 'event_sample'. The 'event_value'\n corresponds to the marker value as found in the TRIG channel of the\n recording. In addition, the 'trial_type' field can be written.\n\n Parameters\n ----------\n events : array, shape = (n_events, 3)\n The first column contains the event time in samples and the third\n column contains the event id. The second column is ignored for now but\n typically contains the value of the trigger channel either immediately\n before the event or immediately after.\n raw : instance of Raw\n The data as MNE-Python Raw object.\n fname : str\n Filename to save the events.tsv to.\n event_id : dict | None\n Dictionary mapping a brief description key to an event id (value). For\n example {'Go': 1, 'No Go': 2}.\n verbose : bool\n Set verbose output to true or false.\n\n Notes\n -----\n The function writes durations of zero for each event.\n\n \"\"\"\n # Start by filling all data that we know into a df\n first_samp = raw.first_samp\n sfreq = raw.info['sfreq']\n events[:, 0] -= first_samp\n\n data = OrderedDict([('onset', events[:, 0]),\n ('duration', np.zeros(events.shape[0])),\n ('trial_type', events[:, 2]),\n ('event_value', events[:, 2]),\n ('event_sample', events[:, 0])])\n\n df = pd.DataFrame.from_dict(data)\n\n # Now check if trial_type is specified or should be removed\n if trial_type:\n trial_type_map = {v: k for k, v in trial_type.items()}\n df.trial_type = df.trial_type.map(trial_type_map)\n else:\n df.drop(labels=['trial_type'], axis=1, inplace=True)\n\n # Onset column needs to be specified in seconds\n df.onset /= sfreq\n\n # Save to file\n df.to_csv(fname, sep='\\t', index=False, na_rep='n/a')\n if verbose:\n print(os.linesep + \"Writing '%s'...\" % fname + os.linesep)\n print(df.head())\n\n return fname\n\n\ndef _scans_tsv(raw, raw_fname, fname, verbose):\n \"\"\"Create a scans.tsv file and save it.\n\n Parameters\n ----------\n raw : instance of Raw\n The data as MNE-Python Raw object.\n raw_fname : str\n Relative path to the raw data file.\n fname : str\n Filename to save the scans.tsv to.\n verbose : bool\n Set verbose output to true or false.\n\n \"\"\"\n # get MEASurement date from the data info\n meas_date = raw.info['meas_date']\n if isinstance(meas_date, (np.ndarray, list)):\n meas_date = meas_date[0]\n\n if meas_date is None:\n acq_time = 'n/a'\n else:\n acq_time = datetime.fromtimestamp(\n meas_date).strftime('%Y-%m-%dT%H:%M:%S')\n\n df = pd.DataFrame(data={'filename': ['%s' % raw_fname],\n 'acq_time': [acq_time]},\n columns=['filename', 'acq_time'])\n\n df.to_csv(fname, sep='\\t', index=False, na_rep='n/a')\n\n if verbose:\n print(os.linesep + \"Writing '%s'...\" % fname + os.linesep)\n print(df.head())\n\n return fname\n\n\ndef _coordsystem_json(raw, unit, orient, manufacturer, fname, verbose):\n \"\"\"Create a coordsystem.json file and save it.\n\n Parameters\n ----------\n raw : instance of Raw\n The data as MNE-Python Raw object.\n unit : str\n Units to be used in the coordsystem specification.\n orient : str\n Used to define the coordinate system for the head coils.\n manufacturer : str\n Used to define the coordinate system for the MEG sensors.\n fname : str\n Filename to save the coordsystem.json to.\n verbose : bool\n Set verbose output to true or false.\n\n \"\"\"\n dig = raw.info['dig']\n coords = dict()\n fids = {d['ident']: d for d in dig if d['kind'] ==\n FIFF.FIFFV_POINT_CARDINAL}\n if fids:\n if FIFF.FIFFV_POINT_NASION in fids:\n coords['NAS'] = fids[FIFF.FIFFV_POINT_NASION]['r'].tolist()\n if FIFF.FIFFV_POINT_LPA in fids:\n coords['LPA'] = fids[FIFF.FIFFV_POINT_LPA]['r'].tolist()\n if FIFF.FIFFV_POINT_RPA in fids:\n coords['RPA'] = fids[FIFF.FIFFV_POINT_RPA]['r'].tolist()\n\n hpi = {d['ident']: d for d in dig if d['kind'] == FIFF.FIFFV_POINT_HPI}\n if hpi:\n for ident in hpi.keys():\n coords['coil%d' % ident] = hpi[ident]['r'].tolist()\n\n coord_frame = set([dig[ii]['coord_frame'] for ii in range(len(dig))])\n if len(coord_frame) > 1:\n err = 'All HPI and Fiducials must be in the same coordinate frame.'\n raise ValueError(err)\n\n fid_json = {'MEGCoordinateSystem': manufacturer,\n 'MEGCoordinateUnits': unit, # XXX validate this\n 'HeadCoilCoordinates': coords,\n 'HeadCoilCoordinateSystem': orient,\n 'HeadCoilCoordinateUnits': unit # XXX validate this\n }\n _write_json(fid_json, fname)\n\n return fname\n\n\ndef _sidecar_json(raw, task, manufacturer, fname, kind,\n verbose=True):\n \"\"\"Create a sidecar json file depending on the kind and save it.\n\n The sidecar json file provides meta data about the data of a certain kind.\n\n Parameters\n ----------\n raw : instance of Raw\n The data as MNE-Python Raw object.\n task : str\n Name of the task the data is based on.\n manufacturer : str\n Manufacturer of the acquisition system. For MEG also used to define the\n coordinate system for the MEG sensors.\n fname : str\n Filename to save the sidecar json to.\n kind : str\n Type of the data as in ALLOWED_KINDS.\n verbose : bool\n Set verbose output to true or false. Defaults to true.\n\n \"\"\"\n sfreq = raw.info['sfreq']\n powerlinefrequency = raw.info.get('line_freq', None)\n if powerlinefrequency is None:\n warn('No line frequency found, defaulting to 50 Hz')\n powerlinefrequency = 50\n\n n_megchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_MEG_CH])\n n_megrefchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_REF_MEG_CH])\n n_eegchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_EEG_CH])\n n_ecogchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_ECOG_CH])\n n_seegchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_SEEG_CH])\n n_eogchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_EOG_CH])\n n_ecgchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_ECG_CH])\n n_emgchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_EMG_CH])\n n_miscchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_MISC_CH])\n n_stimchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_STIM_CH])\n\n # Define modality-specific JSON dictionaries\n ch_info_json_common = [\n ('TaskName', task),\n ('Manufacturer', manufacturer),\n ('PowerLineFrequency', powerlinefrequency)]\n ch_info_json_meg = [\n ('SamplingFrequency', sfreq),\n (\"DewarPosition\", \"XXX\"),\n (\"DigitizedLandmarks\", False),\n (\"DigitizedHeadPoints\", False),\n (\"SoftwareFilters\", \"n/a\"),\n ('MEGChannelCount', n_megchan),\n ('MEGREFChannelCount', n_megrefchan)]\n ch_info_json_ieeg = [\n ('ECOGChannelCount', n_ecogchan),\n ('SEEGChannelCount', n_seegchan)]\n ch_info_ch_counts = [\n ('EEGChannelCount', n_eegchan),\n ('EOGChannelCount', n_eogchan),\n ('ECGChannelCount', n_ecgchan),\n ('EMGChannelCount', n_emgchan),\n ('MiscChannelCount', n_miscchan),\n ('TriggerChannelCount', n_stimchan)]\n\n # Stitch together the complete JSON dictionary\n ch_info_json = ch_info_json_common\n if kind == 'meg':\n append_kind_json = ch_info_json_meg\n elif kind == 'ieeg':\n append_kind_json = ch_info_json_ieeg\n else:\n raise ValueError('Unexpected \"kind\": {}'\n ' Use one of: {}'.format(kind, ALLOWED_KINDS))\n\n ch_info_json += append_kind_json\n ch_info_json += ch_info_ch_counts\n ch_info_json = OrderedDict(ch_info_json)\n\n _write_json(ch_info_json, fname, verbose=verbose)\n return fname\n\n\ndef raw_to_bids(subject_id, task, raw_file, output_path, session_id=None,\n run=None, kind='meg', events_data=None, event_id=None,\n hpi=None, electrode=None, hsp=None, config=None,\n overwrite=True, verbose=True):\n \"\"\"Walk over a folder of files and create BIDS compatible folder.\n\n Parameters\n ----------\n subject_id : str\n The subject name in BIDS compatible format ('01', '02', etc.)\n task : str\n Name of the task the data is based on.\n raw_file : str | instance of mne.Raw\n The raw data. If a string, it is assumed to be the path to the raw data\n file. Otherwise it must be an instance of mne.Raw\n output_path : str\n The path of the BIDS compatible folder\n session_id : str | None\n The session name in BIDS compatible format.\n run : int | None\n The run number for this dataset.\n kind : str, one of ('meg', 'ieeg')\n The kind of data being converted. Defaults to \"meg\".\n events_data : str | array | None\n The events file. If a string, a path to the events file. If an array,\n the MNE events array (shape n_events, 3). If None, events will be\n inferred from the stim channel using `mne.find_events`.\n event_id : dict | None\n The event id dict used to create a 'trial_type' column in events.tsv\n hpi : None | str | list of str\n Marker points representing the location of the marker coils with\n respect to the MEG Sensors, or path to a marker file.\n If list, all of the markers will be averaged together.\n electrode : None | str\n Digitizer points representing the location of the fiducials and the\n marker coils with respect to the digitized head shape, or path to a\n file containing these points.\n hsp : None | str | array, shape = (n_points, 3)\n Digitizer head shape points, or path to head shape file. If more than\n 10`000 points are in the head shape, they are automatically decimated.\n config : str | None\n A path to the configuration file to use if the data is from a BTi\n system.\n overwrite : bool\n If the file already exists, whether to overwrite it.\n verbose : bool\n If verbose is True, this will print a snippet of the sidecar files. If\n False, no content will be printed.\n\n \"\"\"\n if isinstance(raw_file, string_types):\n # We must read in the raw data\n raw = _read_raw(raw_file, electrode=electrode, hsp=hsp, hpi=hpi,\n config=config, verbose=verbose)\n _, ext = _parse_ext(raw_file, verbose=verbose)\n raw_fname = raw_file\n elif isinstance(raw_file, BaseRaw):\n # We got a raw mne object, get back the filename if possible\n # Assume that if no filename attr exists, it's a fif file.\n raw = raw_file.copy()\n if hasattr(raw, 'filenames'):\n _, ext = _parse_ext(raw.filenames[0], verbose=verbose)\n raw_fname = raw.filenames[0]\n else:\n # FIXME: How to get the filename if no filenames attribute?\n raw_fname = 'unknown_file_name'\n ext = '.fif'\n else:\n raise ValueError('raw_file must be an instance of str or BaseRaw, '\n 'got %s' % type(raw_file))\n data_path = make_bids_folders(subject=subject_id, session=session_id,\n kind=kind, root=output_path,\n overwrite=overwrite,\n verbose=verbose)\n if session_id is None:\n ses_path = data_path\n else:\n ses_path = make_bids_folders(subject=subject_id, session=session_id,\n root=output_path,\n overwrite=False,\n verbose=verbose)\n\n # create filenames\n scans_fname = make_bids_filename(\n subject=subject_id, session=session_id, suffix='scans.tsv',\n prefix=ses_path)\n\n coordsystem_fname = make_bids_filename(\n subject=subject_id, session=session_id,\n suffix='coordsystem.json', prefix=data_path)\n data_meta_fname = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s.json' % kind, prefix=data_path)\n if ext in ['.fif', '.gz', '.ds']:\n raw_file_bids = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s%s' % (kind, ext))\n else:\n raw_folder = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s' % kind)\n raw_file_bids = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s%s' % (kind, ext), prefix=raw_folder)\n events_tsv_fname = make_bids_filename(\n subject=subject_id, session=session_id, task=task,\n run=run, suffix='events.tsv', prefix=data_path)\n channels_fname = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='channels.tsv', prefix=data_path)\n\n # Read in Raw object and extract metadata from Raw object if needed\n if kind == 'meg':\n orient = orientation[ext]\n unit = units[ext]\n manufacturer = manufacturers[ext]\n else:\n orient = 'n/a'\n unit = 'n/a'\n manufacturer = 'n/a'\n\n # save stuff\n if kind == 'meg':\n _scans_tsv(raw, os.path.join(kind, raw_file_bids), scans_fname,\n verbose)\n _coordsystem_json(raw, unit, orient, manufacturer, coordsystem_fname,\n verbose)\n\n make_dataset_description(output_path, name=\" \",\n verbose=verbose)\n _sidecar_json(raw, task, manufacturer, data_meta_fname, kind,\n verbose)\n _channels_tsv(raw, channels_fname, verbose)\n\n events = _read_events(events_data, raw)\n if len(events) > 0:\n _events_tsv(events, raw, events_tsv_fname, event_id, verbose)\n\n # set the raw file name to now be the absolute path to ensure the files\n # are placed in the right location\n raw_file_bids = os.path.join(data_path, raw_file_bids)\n if os.path.exists(raw_file_bids) and not overwrite:\n raise ValueError('\"%s\" already exists. Please set'\n ' overwrite to True.' % raw_file_bids)\n _mkdir_p(os.path.dirname(raw_file_bids))\n\n if verbose:\n print('Writing data files to %s' % raw_file_bids)\n\n if ext not in ALLOWED_EXTENSIONS:\n raise ValueError('ext must be in %s, got %s'\n % (''.join(ALLOWED_EXTENSIONS), ext))\n\n # for FIF, we need to re-save the file to fix the file pointer\n # for files with multiple parts\n if ext in ['.fif', '.gz']:\n raw.save(raw_file_bids, overwrite=overwrite)\n elif ext == '.ds':\n sh.copytree(raw_fname, raw_file_bids)\n else:\n sh.copyfile(raw_fname, raw_file_bids)\n\n return output_path\n", "path": "mne_bids/mne_bids.py"}]}
gh_patches_debug_32737
rasdani/github-patches
git_diff
dask__dask-586
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Additional improvement for da.image.imread I think following 2 improvements make the function better. #### 1. Allow custom `imread` function. Some prefer `opencv` which reads color in BGR order, otherwise `skimage` reads in RGB order. Adding `dialect` option (like `dialect='skimage'` or `dialect='cv'`) or accept different file read function may better. #### 2. Allow custom preprocessing function. In image processing, input images may have different sizes. In these case, preprocessing is needed between image read and array creation. This preprocessing function must return the same size of array on user's responsibility. </issue> <code> [start of dask/array/image.py] 1 from glob import glob 2 import os 3 4 try: 5 from skimage.io import imread as sk_imread 6 except ImportError: 7 pass 8 9 from .core import Array 10 from ..base import tokenize 11 12 def add_leading_dimension(x): 13 return x[None, ...] 14 15 16 def imread(filename): 17 """ Read a stack of images into a dask array 18 19 Parameters 20 ---------- 21 22 filename: string 23 A globstring like 'myfile.*.png' 24 25 Example 26 ------- 27 28 >>> from dask.array.image import imread 29 >>> im = imread('2015-*-*.png') # doctest: +SKIP 30 >>> im.shape # doctest: +SKIP 31 (365, 1000, 1000, 3) 32 33 Returns 34 ------- 35 36 Dask array of all images stacked along the first dimension. All images 37 will be treated as individual chunks 38 """ 39 filenames = sorted(glob(filename)) 40 if not filenames: 41 raise ValueError("No files found under name %s" % filename) 42 43 name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames)) 44 45 sample = sk_imread(filenames[0]) 46 47 dsk = dict(((name, i) + (0,) * len(sample.shape), 48 (add_leading_dimension, (sk_imread, filename))) 49 for i, filename in enumerate(filenames)) 50 51 chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape) 52 53 return Array(dsk, name, chunks, sample.dtype) 54 [end of dask/array/image.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dask/array/image.py b/dask/array/image.py --- a/dask/array/image.py +++ b/dask/array/image.py @@ -13,7 +13,7 @@ return x[None, ...] -def imread(filename): +def imread(filename, imread=None, preprocess=None): """ Read a stack of images into a dask array Parameters @@ -21,6 +21,13 @@ filename: string A globstring like 'myfile.*.png' + imread: function (optional) + Optionally provide custom imread function. + Function should expect a filename and produce a numpy array. + Defaults to ``skimage.io.imread``. + preprocess: function (optional) + Optionally provide custom function to preprocess the image. + Function should expect a numpy array for a single image. Example ------- @@ -36,17 +43,25 @@ Dask array of all images stacked along the first dimension. All images will be treated as individual chunks """ + imread = imread or sk_imread filenames = sorted(glob(filename)) if not filenames: raise ValueError("No files found under name %s" % filename) name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames)) - sample = sk_imread(filenames[0]) - - dsk = dict(((name, i) + (0,) * len(sample.shape), - (add_leading_dimension, (sk_imread, filename))) - for i, filename in enumerate(filenames)) + sample = imread(filenames[0]) + if preprocess: + sample = preprocess(sample) + + keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))] + if preprocess: + values = [(add_leading_dimension, (preprocess, (imread, filename))) + for filename in filenames] + else: + values = [(add_leading_dimension, (imread, filename)) + for filename in filenames] + dsk = dict(zip(keys, values)) chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)
{"golden_diff": "diff --git a/dask/array/image.py b/dask/array/image.py\n--- a/dask/array/image.py\n+++ b/dask/array/image.py\n@@ -13,7 +13,7 @@\n return x[None, ...]\n \n \n-def imread(filename):\n+def imread(filename, imread=None, preprocess=None):\n \"\"\" Read a stack of images into a dask array\n \n Parameters\n@@ -21,6 +21,13 @@\n \n filename: string\n A globstring like 'myfile.*.png'\n+ imread: function (optional)\n+ Optionally provide custom imread function.\n+ Function should expect a filename and produce a numpy array.\n+ Defaults to ``skimage.io.imread``.\n+ preprocess: function (optional)\n+ Optionally provide custom function to preprocess the image.\n+ Function should expect a numpy array for a single image.\n \n Example\n -------\n@@ -36,17 +43,25 @@\n Dask array of all images stacked along the first dimension. All images\n will be treated as individual chunks\n \"\"\"\n+ imread = imread or sk_imread\n filenames = sorted(glob(filename))\n if not filenames:\n raise ValueError(\"No files found under name %s\" % filename)\n \n name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames))\n \n- sample = sk_imread(filenames[0])\n-\n- dsk = dict(((name, i) + (0,) * len(sample.shape),\n- (add_leading_dimension, (sk_imread, filename)))\n- for i, filename in enumerate(filenames))\n+ sample = imread(filenames[0])\n+ if preprocess:\n+ sample = preprocess(sample)\n+\n+ keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))]\n+ if preprocess:\n+ values = [(add_leading_dimension, (preprocess, (imread, filename)))\n+ for filename in filenames]\n+ else:\n+ values = [(add_leading_dimension, (imread, filename))\n+ for filename in filenames]\n+ dsk = dict(zip(keys, values))\n \n chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)\n", "issue": "Additional improvement for da.image.imread\nI think following 2 improvements make the function better.\n#### 1. Allow custom `imread` function.\n\nSome prefer `opencv` which reads color in BGR order, otherwise `skimage` reads in RGB order. Adding `dialect` option (like `dialect='skimage'` or `dialect='cv'`) or accept different file read function may better.\n#### 2. Allow custom preprocessing function.\n\nIn image processing, input images may have different sizes. In these case, preprocessing is needed between image read and array creation. This preprocessing function must return the same size of array on user's responsibility.\n\n", "before_files": [{"content": "from glob import glob\nimport os\n\ntry:\n from skimage.io import imread as sk_imread\nexcept ImportError:\n pass\n\nfrom .core import Array\nfrom ..base import tokenize\n\ndef add_leading_dimension(x):\n return x[None, ...]\n\n\ndef imread(filename):\n \"\"\" Read a stack of images into a dask array\n\n Parameters\n ----------\n\n filename: string\n A globstring like 'myfile.*.png'\n\n Example\n -------\n\n >>> from dask.array.image import imread\n >>> im = imread('2015-*-*.png') # doctest: +SKIP\n >>> im.shape # doctest: +SKIP\n (365, 1000, 1000, 3)\n\n Returns\n -------\n\n Dask array of all images stacked along the first dimension. All images\n will be treated as individual chunks\n \"\"\"\n filenames = sorted(glob(filename))\n if not filenames:\n raise ValueError(\"No files found under name %s\" % filename)\n\n name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames))\n\n sample = sk_imread(filenames[0])\n\n dsk = dict(((name, i) + (0,) * len(sample.shape),\n (add_leading_dimension, (sk_imread, filename)))\n for i, filename in enumerate(filenames))\n\n chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)\n\n return Array(dsk, name, chunks, sample.dtype)\n", "path": "dask/array/image.py"}]}
gh_patches_debug_19722
rasdani/github-patches
git_diff
iterative__dvc-7283
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 'None' does not contain DVC directory ```console cd "$(mktemp -d)" dvc add foo # or any other command ``` </issue> <code> [start of dvc/repo/__init__.py] 1 import logging 2 import os 3 from collections import defaultdict 4 from contextlib import contextmanager 5 from functools import wraps 6 from typing import TYPE_CHECKING, Callable, Optional, Set 7 8 from funcy import cached_property 9 10 from dvc.exceptions import FileMissingError 11 from dvc.exceptions import IsADirectoryError as DvcIsADirectoryError 12 from dvc.exceptions import NotDvcRepoError, OutputNotFoundError 13 from dvc.ignore import DvcIgnoreFilter 14 from dvc.utils import env2bool 15 from dvc.utils.fs import path_isin 16 17 if TYPE_CHECKING: 18 from dvc.fs.base import FileSystem 19 from dvc.objects.file import HashFile 20 from dvc.repo.scm_context import SCMContext 21 22 logger = logging.getLogger(__name__) 23 24 25 @contextmanager 26 def lock_repo(repo: "Repo"): 27 # pylint: disable=protected-access 28 depth = repo._lock_depth 29 repo._lock_depth += 1 30 31 try: 32 if depth > 0: 33 yield 34 else: 35 with repo.lock: 36 repo._reset() 37 yield 38 # Graph cache is no longer valid after we release the repo.lock 39 repo._reset() 40 finally: 41 repo._lock_depth = depth 42 43 44 def locked(f): 45 @wraps(f) 46 def wrapper(repo, *args, **kwargs): 47 with lock_repo(repo): 48 return f(repo, *args, **kwargs) 49 50 return wrapper 51 52 53 class Repo: 54 DVC_DIR = ".dvc" 55 56 from dvc.repo.add import add 57 from dvc.repo.checkout import checkout 58 from dvc.repo.commit import commit 59 from dvc.repo.destroy import destroy 60 from dvc.repo.diff import diff 61 from dvc.repo.fetch import fetch 62 from dvc.repo.freeze import freeze, unfreeze 63 from dvc.repo.gc import gc 64 from dvc.repo.get import get as _get 65 from dvc.repo.get_url import get_url as _get_url 66 from dvc.repo.imp import imp 67 from dvc.repo.imp_url import imp_url 68 from dvc.repo.install import install 69 from dvc.repo.ls import ls as _ls 70 from dvc.repo.move import move 71 from dvc.repo.pull import pull 72 from dvc.repo.push import push 73 from dvc.repo.remove import remove 74 from dvc.repo.reproduce import reproduce 75 from dvc.repo.run import run 76 from dvc.repo.status import status 77 from dvc.repo.update import update 78 79 ls = staticmethod(_ls) 80 get = staticmethod(_get) 81 get_url = staticmethod(_get_url) 82 83 def _get_repo_dirs( 84 self, 85 root_dir: str = None, 86 fs: "FileSystem" = None, 87 uninitialized: bool = False, 88 ): 89 from dvc.scm import SCM, Base, SCMError 90 from dvc.utils.fs import makedirs 91 92 dvc_dir = None 93 tmp_dir = None 94 try: 95 root_dir = self.find_root(root_dir, fs) 96 dvc_dir = os.path.join(root_dir, self.DVC_DIR) 97 tmp_dir = os.path.join(dvc_dir, "tmp") 98 makedirs(tmp_dir, exist_ok=True) 99 except NotDvcRepoError: 100 if not uninitialized: 101 raise 102 103 try: 104 scm = SCM(root_dir or os.curdir) 105 except SCMError: 106 scm = SCM(os.curdir, no_scm=True) 107 108 assert isinstance(scm, Base) 109 root_dir = scm.root_dir 110 111 return root_dir, dvc_dir, tmp_dir 112 113 def _get_database_dir(self, db_name): 114 # NOTE: by default, store SQLite-based remote indexes and state's 115 # `links` and `md5s` caches in the repository itself to avoid any 116 # possible state corruption in 'shared cache dir' scenario, but allow 117 # user to override this through config when, say, the repository is 118 # located on a mounted volume — see 119 # https://github.com/iterative/dvc/issues/4420 120 base_db_dir = self.config.get(db_name, {}).get("dir", None) 121 if not base_db_dir: 122 return self.tmp_dir 123 124 import hashlib 125 126 from dvc.utils.fs import makedirs 127 128 root_dir_hash = hashlib.sha224( 129 self.root_dir.encode("utf-8") 130 ).hexdigest() 131 132 db_dir = os.path.join( 133 base_db_dir, 134 self.DVC_DIR, 135 f"{os.path.basename(self.root_dir)}-{root_dir_hash[0:7]}", 136 ) 137 138 makedirs(db_dir, exist_ok=True) 139 return db_dir 140 141 def __init__( 142 self, 143 root_dir=None, 144 fs=None, 145 rev=None, 146 subrepos=False, 147 uninitialized=False, 148 config=None, 149 url=None, 150 repo_factory=None, 151 ): 152 from dvc.config import Config 153 from dvc.data.db import ODBManager 154 from dvc.data_cloud import DataCloud 155 from dvc.fs.git import GitFileSystem 156 from dvc.fs.local import localfs 157 from dvc.lock import LockNoop, make_lock 158 from dvc.repo.live import Live 159 from dvc.repo.metrics import Metrics 160 from dvc.repo.params import Params 161 from dvc.repo.plots import Plots 162 from dvc.repo.stage import StageLoad 163 from dvc.scm import SCM 164 from dvc.stage.cache import StageCache 165 from dvc.state import State, StateNoop 166 167 self.url = url 168 self._fs_conf = {"repo_factory": repo_factory} 169 self._fs = fs or localfs 170 self._scm = None 171 172 if rev and not fs: 173 self._scm = SCM(root_dir or os.curdir) 174 self._fs = GitFileSystem(scm=self._scm, rev=rev) 175 176 self.root_dir, self.dvc_dir, self.tmp_dir = self._get_repo_dirs( 177 root_dir=root_dir, fs=self.fs, uninitialized=uninitialized 178 ) 179 180 self.config = Config(self.dvc_dir, fs=self.fs, config=config) 181 self._uninitialized = uninitialized 182 183 # used by RepoFileSystem to determine if it should traverse subrepos 184 self.subrepos = subrepos 185 186 self.cloud = DataCloud(self) 187 self.stage = StageLoad(self) 188 189 if isinstance(self.fs, GitFileSystem) or not self.dvc_dir: 190 self.lock = LockNoop() 191 self.state = StateNoop() 192 self.odb = ODBManager(self) 193 else: 194 self.lock = make_lock( 195 os.path.join(self.tmp_dir, "lock"), 196 tmp_dir=self.tmp_dir, 197 hardlink_lock=self.config["core"].get("hardlink_lock", False), 198 friendly=True, 199 ) 200 201 state_db_dir = self._get_database_dir("state") 202 self.state = State(self.root_dir, state_db_dir, self.dvcignore) 203 self.odb = ODBManager(self) 204 205 self.stage_cache = StageCache(self) 206 207 self._ignore() 208 209 self.metrics = Metrics(self) 210 self.plots = Plots(self) 211 self.params = Params(self) 212 self.live = Live(self) 213 214 self.stage_collection_error_handler: Optional[ 215 Callable[[str, Exception], None] 216 ] = None 217 self._lock_depth = 0 218 219 def __str__(self): 220 return self.url or self.root_dir 221 222 @cached_property 223 def index(self): 224 from dvc.repo.index import Index 225 226 return Index(self) 227 228 @staticmethod 229 def open(url, *args, **kwargs): 230 if url is None: 231 url = os.getcwd() 232 233 if os.path.exists(url): 234 try: 235 return Repo(url, *args, **kwargs) 236 except NotDvcRepoError: 237 pass # fallthrough to external_repo 238 239 from dvc.external_repo import external_repo 240 241 return external_repo(url, *args, **kwargs) 242 243 @cached_property 244 def scm(self): 245 from dvc.scm import SCM, SCMError 246 247 if self._scm: 248 return self._scm 249 250 no_scm = self.config["core"].get("no_scm", False) 251 try: 252 return SCM(self.root_dir, no_scm=no_scm) 253 except SCMError: 254 if self._uninitialized: 255 # might not be a git/dvc repo at all 256 # used in `params/metrics/plots/live` targets 257 return SCM(self.root_dir, no_scm=True) 258 raise 259 260 @cached_property 261 def scm_context(self) -> "SCMContext": 262 from dvc.repo.scm_context import SCMContext 263 264 return SCMContext(self.scm, self.config) 265 266 @cached_property 267 def dvcignore(self) -> DvcIgnoreFilter: 268 269 return DvcIgnoreFilter(self.fs, self.root_dir) 270 271 def get_rev(self): 272 from dvc.fs.local import LocalFileSystem 273 274 assert self.scm 275 if isinstance(self.fs, LocalFileSystem): 276 from dvc.scm import map_scm_exception 277 278 with map_scm_exception(): 279 return self.scm.get_rev() 280 return self.fs.rev 281 282 @cached_property 283 def experiments(self): 284 from dvc.repo.experiments import Experiments 285 286 return Experiments(self) 287 288 @cached_property 289 def machine(self): 290 from dvc.machine import MachineManager 291 292 if self.tmp_dir and ( 293 self.config["feature"].get("machine", False) 294 or env2bool("DVC_TEST") 295 ): 296 return MachineManager(self) 297 return None 298 299 @property 300 def fs(self) -> "FileSystem": 301 return self._fs 302 303 @fs.setter 304 def fs(self, fs: "FileSystem"): 305 self._fs = fs 306 # Our graph cache is no longer valid, as it was based on the previous 307 # fs. 308 self._reset() 309 310 def __repr__(self): 311 return f"{self.__class__.__name__}: '{self.root_dir}'" 312 313 @classmethod 314 def find_root(cls, root=None, fs=None) -> str: 315 from dvc.fs.local import LocalFileSystem, localfs 316 317 root_dir = os.path.realpath(root or os.curdir) 318 _fs = fs 319 fs = fs or localfs 320 321 if not fs.isdir(root_dir): 322 raise NotDvcRepoError(f"directory '{root}' does not exist") 323 324 while True: 325 dvc_dir = fs.path.join(root_dir, cls.DVC_DIR) 326 if fs.isdir(dvc_dir): 327 return root_dir 328 if isinstance(fs, LocalFileSystem) and os.path.ismount(root_dir): 329 break 330 parent = fs.path.parent(root_dir) 331 if parent == root_dir: 332 break 333 root_dir = parent 334 335 if _fs: 336 msg = f"'{root}' does not contain DVC directory" 337 else: 338 msg = ( 339 "you are not inside of a DVC repository " 340 f"(checked up to mount point '{root_dir}')" 341 ) 342 raise NotDvcRepoError(msg) 343 344 @classmethod 345 def find_dvc_dir(cls, root=None): 346 root_dir = cls.find_root(root) 347 return os.path.join(root_dir, cls.DVC_DIR) 348 349 @staticmethod 350 def init(root_dir=os.curdir, no_scm=False, force=False, subdir=False): 351 from dvc.repo.init import init 352 353 return init( 354 root_dir=root_dir, no_scm=no_scm, force=force, subdir=subdir 355 ) 356 357 def unprotect(self, target): 358 return self.odb.local.unprotect(target) 359 360 def _ignore(self): 361 flist = [self.config.files["local"], self.tmp_dir] 362 363 if path_isin(self.odb.local.cache_dir, self.root_dir): 364 flist += [self.odb.local.cache_dir] 365 366 for file in flist: 367 self.scm_context.ignore(file) 368 369 def brancher(self, *args, **kwargs): 370 from dvc.repo.brancher import brancher 371 372 return brancher(self, *args, **kwargs) 373 374 def used_objs( 375 self, 376 targets=None, 377 all_branches=False, 378 with_deps=False, 379 all_tags=False, 380 all_commits=False, 381 all_experiments=False, 382 remote=None, 383 force=False, 384 jobs=None, 385 recursive=False, 386 used_run_cache=None, 387 revs=None, 388 ): 389 """Get the stages related to the given target and collect 390 the `info` of its outputs. 391 392 This is useful to know what files from the cache are _in use_ 393 (namely, a file described as an output on a stage). 394 395 The scope is, by default, the working directory, but you can use 396 `all_branches`/`all_tags`/`all_commits`/`all_experiments` to expand 397 the scope. 398 399 Returns: 400 A dict mapping (remote) ODB instances to sets of objects that 401 belong to each ODB. If the ODB instance is None, the objects 402 are naive and do not belong to a specific remote ODB. 403 """ 404 used = defaultdict(set) 405 406 def _add_suffix(objs: Set["HashFile"], suffix: str) -> None: 407 from itertools import chain 408 409 from dvc.data import iterobjs 410 411 for obj in chain.from_iterable(map(iterobjs, objs)): 412 if obj.name is not None: 413 obj.name += suffix 414 415 for branch in self.brancher( 416 revs=revs, 417 all_branches=all_branches, 418 all_tags=all_tags, 419 all_commits=all_commits, 420 all_experiments=all_experiments, 421 ): 422 for odb, objs in self.index.used_objs( 423 targets, 424 remote=remote, 425 force=force, 426 jobs=jobs, 427 recursive=recursive, 428 with_deps=with_deps, 429 ).items(): 430 if branch: 431 _add_suffix(objs, f" ({branch})") 432 used[odb].update(objs) 433 434 if used_run_cache: 435 for odb, objs in self.stage_cache.get_used_objs( 436 used_run_cache, remote=remote, force=force, jobs=jobs 437 ).items(): 438 used[odb].update(objs) 439 440 return used 441 442 @property 443 def stages(self): # obsolete, only for backward-compatibility 444 return self.index.stages 445 446 def find_outs_by_path(self, path, outs=None, recursive=False, strict=True): 447 # using `outs_graph` to ensure graph checks are run 448 outs = outs or self.index.outs_graph 449 450 abs_path = os.path.abspath(path) 451 fs_path = abs_path 452 453 def func(out): 454 def eq(one, two): 455 return one == two 456 457 match = eq if strict else out.fs.path.isin_or_eq 458 459 if out.scheme == "local" and match(fs_path, out.fs_path): 460 return True 461 462 if recursive and out.fs.path.isin(out.fs_path, fs_path): 463 return True 464 465 return False 466 467 matched = list(filter(func, outs)) 468 if not matched: 469 raise OutputNotFoundError(path, self) 470 471 return matched 472 473 def is_dvc_internal(self, path): 474 path_parts = os.path.normpath(path).split(os.path.sep) 475 return self.DVC_DIR in path_parts 476 477 @cached_property 478 def dvcfs(self): 479 from dvc.fs.dvc import DvcFileSystem 480 481 return DvcFileSystem(repo=self) 482 483 @cached_property 484 def repo_fs(self): 485 from dvc.fs.repo import RepoFileSystem 486 487 return RepoFileSystem(self, subrepos=self.subrepos, **self._fs_conf) 488 489 @cached_property 490 def index_db_dir(self): 491 return self._get_database_dir("index") 492 493 @contextmanager 494 def open_by_relpath(self, path, remote=None, mode="r", encoding=None): 495 """Opens a specified resource as a file descriptor""" 496 from dvc.fs.repo import RepoFileSystem 497 498 fs = RepoFileSystem(self, subrepos=True) 499 path = self.fs.path.join(self.root_dir, path) 500 try: 501 with fs.open( 502 path, mode=mode, encoding=encoding, remote=remote 503 ) as fobj: 504 yield fobj 505 except FileNotFoundError as exc: 506 raise FileMissingError(path) from exc 507 except IsADirectoryError as exc: 508 raise DvcIsADirectoryError(f"'{path}' is a directory") from exc 509 510 def close(self): 511 self.scm.close() 512 self.state.close() 513 514 def _reset(self): 515 self.state.close() 516 self.scm._reset() # pylint: disable=protected-access 517 self.__dict__.pop("index", None) 518 self.__dict__.pop("dvcignore", None) 519 520 def __enter__(self): 521 return self 522 523 def __exit__(self, exc_type, exc_val, exc_tb): 524 self._reset() 525 self.scm.close() 526 [end of dvc/repo/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/repo/__init__.py b/dvc/repo/__init__.py --- a/dvc/repo/__init__.py +++ b/dvc/repo/__init__.py @@ -314,8 +314,8 @@ def find_root(cls, root=None, fs=None) -> str: from dvc.fs.local import LocalFileSystem, localfs - root_dir = os.path.realpath(root or os.curdir) - _fs = fs + root = root or os.curdir + root_dir = os.path.realpath(root) fs = fs or localfs if not fs.isdir(root_dir): @@ -332,13 +332,11 @@ break root_dir = parent - if _fs: - msg = f"'{root}' does not contain DVC directory" - else: - msg = ( - "you are not inside of a DVC repository " - f"(checked up to mount point '{root_dir}')" - ) + msg = "you are not inside of a DVC repository" + + if isinstance(fs, LocalFileSystem): + msg = f"{msg} (checked up to mount point '{root_dir}')" + raise NotDvcRepoError(msg) @classmethod
{"golden_diff": "diff --git a/dvc/repo/__init__.py b/dvc/repo/__init__.py\n--- a/dvc/repo/__init__.py\n+++ b/dvc/repo/__init__.py\n@@ -314,8 +314,8 @@\n def find_root(cls, root=None, fs=None) -> str:\n from dvc.fs.local import LocalFileSystem, localfs\n \n- root_dir = os.path.realpath(root or os.curdir)\n- _fs = fs\n+ root = root or os.curdir\n+ root_dir = os.path.realpath(root)\n fs = fs or localfs\n \n if not fs.isdir(root_dir):\n@@ -332,13 +332,11 @@\n break\n root_dir = parent\n \n- if _fs:\n- msg = f\"'{root}' does not contain DVC directory\"\n- else:\n- msg = (\n- \"you are not inside of a DVC repository \"\n- f\"(checked up to mount point '{root_dir}')\"\n- )\n+ msg = \"you are not inside of a DVC repository\"\n+\n+ if isinstance(fs, LocalFileSystem):\n+ msg = f\"{msg} (checked up to mount point '{root_dir}')\"\n+\n raise NotDvcRepoError(msg)\n \n @classmethod\n", "issue": "'None' does not contain DVC directory\n```console\r\ncd \"$(mktemp -d)\"\r\ndvc add foo # or any other command\r\n```\r\n\n", "before_files": [{"content": "import logging\nimport os\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom functools import wraps\nfrom typing import TYPE_CHECKING, Callable, Optional, Set\n\nfrom funcy import cached_property\n\nfrom dvc.exceptions import FileMissingError\nfrom dvc.exceptions import IsADirectoryError as DvcIsADirectoryError\nfrom dvc.exceptions import NotDvcRepoError, OutputNotFoundError\nfrom dvc.ignore import DvcIgnoreFilter\nfrom dvc.utils import env2bool\nfrom dvc.utils.fs import path_isin\n\nif TYPE_CHECKING:\n from dvc.fs.base import FileSystem\n from dvc.objects.file import HashFile\n from dvc.repo.scm_context import SCMContext\n\nlogger = logging.getLogger(__name__)\n\n\n@contextmanager\ndef lock_repo(repo: \"Repo\"):\n # pylint: disable=protected-access\n depth = repo._lock_depth\n repo._lock_depth += 1\n\n try:\n if depth > 0:\n yield\n else:\n with repo.lock:\n repo._reset()\n yield\n # Graph cache is no longer valid after we release the repo.lock\n repo._reset()\n finally:\n repo._lock_depth = depth\n\n\ndef locked(f):\n @wraps(f)\n def wrapper(repo, *args, **kwargs):\n with lock_repo(repo):\n return f(repo, *args, **kwargs)\n\n return wrapper\n\n\nclass Repo:\n DVC_DIR = \".dvc\"\n\n from dvc.repo.add import add\n from dvc.repo.checkout import checkout\n from dvc.repo.commit import commit\n from dvc.repo.destroy import destroy\n from dvc.repo.diff import diff\n from dvc.repo.fetch import fetch\n from dvc.repo.freeze import freeze, unfreeze\n from dvc.repo.gc import gc\n from dvc.repo.get import get as _get\n from dvc.repo.get_url import get_url as _get_url\n from dvc.repo.imp import imp\n from dvc.repo.imp_url import imp_url\n from dvc.repo.install import install\n from dvc.repo.ls import ls as _ls\n from dvc.repo.move import move\n from dvc.repo.pull import pull\n from dvc.repo.push import push\n from dvc.repo.remove import remove\n from dvc.repo.reproduce import reproduce\n from dvc.repo.run import run\n from dvc.repo.status import status\n from dvc.repo.update import update\n\n ls = staticmethod(_ls)\n get = staticmethod(_get)\n get_url = staticmethod(_get_url)\n\n def _get_repo_dirs(\n self,\n root_dir: str = None,\n fs: \"FileSystem\" = None,\n uninitialized: bool = False,\n ):\n from dvc.scm import SCM, Base, SCMError\n from dvc.utils.fs import makedirs\n\n dvc_dir = None\n tmp_dir = None\n try:\n root_dir = self.find_root(root_dir, fs)\n dvc_dir = os.path.join(root_dir, self.DVC_DIR)\n tmp_dir = os.path.join(dvc_dir, \"tmp\")\n makedirs(tmp_dir, exist_ok=True)\n except NotDvcRepoError:\n if not uninitialized:\n raise\n\n try:\n scm = SCM(root_dir or os.curdir)\n except SCMError:\n scm = SCM(os.curdir, no_scm=True)\n\n assert isinstance(scm, Base)\n root_dir = scm.root_dir\n\n return root_dir, dvc_dir, tmp_dir\n\n def _get_database_dir(self, db_name):\n # NOTE: by default, store SQLite-based remote indexes and state's\n # `links` and `md5s` caches in the repository itself to avoid any\n # possible state corruption in 'shared cache dir' scenario, but allow\n # user to override this through config when, say, the repository is\n # located on a mounted volume \u2014 see\n # https://github.com/iterative/dvc/issues/4420\n base_db_dir = self.config.get(db_name, {}).get(\"dir\", None)\n if not base_db_dir:\n return self.tmp_dir\n\n import hashlib\n\n from dvc.utils.fs import makedirs\n\n root_dir_hash = hashlib.sha224(\n self.root_dir.encode(\"utf-8\")\n ).hexdigest()\n\n db_dir = os.path.join(\n base_db_dir,\n self.DVC_DIR,\n f\"{os.path.basename(self.root_dir)}-{root_dir_hash[0:7]}\",\n )\n\n makedirs(db_dir, exist_ok=True)\n return db_dir\n\n def __init__(\n self,\n root_dir=None,\n fs=None,\n rev=None,\n subrepos=False,\n uninitialized=False,\n config=None,\n url=None,\n repo_factory=None,\n ):\n from dvc.config import Config\n from dvc.data.db import ODBManager\n from dvc.data_cloud import DataCloud\n from dvc.fs.git import GitFileSystem\n from dvc.fs.local import localfs\n from dvc.lock import LockNoop, make_lock\n from dvc.repo.live import Live\n from dvc.repo.metrics import Metrics\n from dvc.repo.params import Params\n from dvc.repo.plots import Plots\n from dvc.repo.stage import StageLoad\n from dvc.scm import SCM\n from dvc.stage.cache import StageCache\n from dvc.state import State, StateNoop\n\n self.url = url\n self._fs_conf = {\"repo_factory\": repo_factory}\n self._fs = fs or localfs\n self._scm = None\n\n if rev and not fs:\n self._scm = SCM(root_dir or os.curdir)\n self._fs = GitFileSystem(scm=self._scm, rev=rev)\n\n self.root_dir, self.dvc_dir, self.tmp_dir = self._get_repo_dirs(\n root_dir=root_dir, fs=self.fs, uninitialized=uninitialized\n )\n\n self.config = Config(self.dvc_dir, fs=self.fs, config=config)\n self._uninitialized = uninitialized\n\n # used by RepoFileSystem to determine if it should traverse subrepos\n self.subrepos = subrepos\n\n self.cloud = DataCloud(self)\n self.stage = StageLoad(self)\n\n if isinstance(self.fs, GitFileSystem) or not self.dvc_dir:\n self.lock = LockNoop()\n self.state = StateNoop()\n self.odb = ODBManager(self)\n else:\n self.lock = make_lock(\n os.path.join(self.tmp_dir, \"lock\"),\n tmp_dir=self.tmp_dir,\n hardlink_lock=self.config[\"core\"].get(\"hardlink_lock\", False),\n friendly=True,\n )\n\n state_db_dir = self._get_database_dir(\"state\")\n self.state = State(self.root_dir, state_db_dir, self.dvcignore)\n self.odb = ODBManager(self)\n\n self.stage_cache = StageCache(self)\n\n self._ignore()\n\n self.metrics = Metrics(self)\n self.plots = Plots(self)\n self.params = Params(self)\n self.live = Live(self)\n\n self.stage_collection_error_handler: Optional[\n Callable[[str, Exception], None]\n ] = None\n self._lock_depth = 0\n\n def __str__(self):\n return self.url or self.root_dir\n\n @cached_property\n def index(self):\n from dvc.repo.index import Index\n\n return Index(self)\n\n @staticmethod\n def open(url, *args, **kwargs):\n if url is None:\n url = os.getcwd()\n\n if os.path.exists(url):\n try:\n return Repo(url, *args, **kwargs)\n except NotDvcRepoError:\n pass # fallthrough to external_repo\n\n from dvc.external_repo import external_repo\n\n return external_repo(url, *args, **kwargs)\n\n @cached_property\n def scm(self):\n from dvc.scm import SCM, SCMError\n\n if self._scm:\n return self._scm\n\n no_scm = self.config[\"core\"].get(\"no_scm\", False)\n try:\n return SCM(self.root_dir, no_scm=no_scm)\n except SCMError:\n if self._uninitialized:\n # might not be a git/dvc repo at all\n # used in `params/metrics/plots/live` targets\n return SCM(self.root_dir, no_scm=True)\n raise\n\n @cached_property\n def scm_context(self) -> \"SCMContext\":\n from dvc.repo.scm_context import SCMContext\n\n return SCMContext(self.scm, self.config)\n\n @cached_property\n def dvcignore(self) -> DvcIgnoreFilter:\n\n return DvcIgnoreFilter(self.fs, self.root_dir)\n\n def get_rev(self):\n from dvc.fs.local import LocalFileSystem\n\n assert self.scm\n if isinstance(self.fs, LocalFileSystem):\n from dvc.scm import map_scm_exception\n\n with map_scm_exception():\n return self.scm.get_rev()\n return self.fs.rev\n\n @cached_property\n def experiments(self):\n from dvc.repo.experiments import Experiments\n\n return Experiments(self)\n\n @cached_property\n def machine(self):\n from dvc.machine import MachineManager\n\n if self.tmp_dir and (\n self.config[\"feature\"].get(\"machine\", False)\n or env2bool(\"DVC_TEST\")\n ):\n return MachineManager(self)\n return None\n\n @property\n def fs(self) -> \"FileSystem\":\n return self._fs\n\n @fs.setter\n def fs(self, fs: \"FileSystem\"):\n self._fs = fs\n # Our graph cache is no longer valid, as it was based on the previous\n # fs.\n self._reset()\n\n def __repr__(self):\n return f\"{self.__class__.__name__}: '{self.root_dir}'\"\n\n @classmethod\n def find_root(cls, root=None, fs=None) -> str:\n from dvc.fs.local import LocalFileSystem, localfs\n\n root_dir = os.path.realpath(root or os.curdir)\n _fs = fs\n fs = fs or localfs\n\n if not fs.isdir(root_dir):\n raise NotDvcRepoError(f\"directory '{root}' does not exist\")\n\n while True:\n dvc_dir = fs.path.join(root_dir, cls.DVC_DIR)\n if fs.isdir(dvc_dir):\n return root_dir\n if isinstance(fs, LocalFileSystem) and os.path.ismount(root_dir):\n break\n parent = fs.path.parent(root_dir)\n if parent == root_dir:\n break\n root_dir = parent\n\n if _fs:\n msg = f\"'{root}' does not contain DVC directory\"\n else:\n msg = (\n \"you are not inside of a DVC repository \"\n f\"(checked up to mount point '{root_dir}')\"\n )\n raise NotDvcRepoError(msg)\n\n @classmethod\n def find_dvc_dir(cls, root=None):\n root_dir = cls.find_root(root)\n return os.path.join(root_dir, cls.DVC_DIR)\n\n @staticmethod\n def init(root_dir=os.curdir, no_scm=False, force=False, subdir=False):\n from dvc.repo.init import init\n\n return init(\n root_dir=root_dir, no_scm=no_scm, force=force, subdir=subdir\n )\n\n def unprotect(self, target):\n return self.odb.local.unprotect(target)\n\n def _ignore(self):\n flist = [self.config.files[\"local\"], self.tmp_dir]\n\n if path_isin(self.odb.local.cache_dir, self.root_dir):\n flist += [self.odb.local.cache_dir]\n\n for file in flist:\n self.scm_context.ignore(file)\n\n def brancher(self, *args, **kwargs):\n from dvc.repo.brancher import brancher\n\n return brancher(self, *args, **kwargs)\n\n def used_objs(\n self,\n targets=None,\n all_branches=False,\n with_deps=False,\n all_tags=False,\n all_commits=False,\n all_experiments=False,\n remote=None,\n force=False,\n jobs=None,\n recursive=False,\n used_run_cache=None,\n revs=None,\n ):\n \"\"\"Get the stages related to the given target and collect\n the `info` of its outputs.\n\n This is useful to know what files from the cache are _in use_\n (namely, a file described as an output on a stage).\n\n The scope is, by default, the working directory, but you can use\n `all_branches`/`all_tags`/`all_commits`/`all_experiments` to expand\n the scope.\n\n Returns:\n A dict mapping (remote) ODB instances to sets of objects that\n belong to each ODB. If the ODB instance is None, the objects\n are naive and do not belong to a specific remote ODB.\n \"\"\"\n used = defaultdict(set)\n\n def _add_suffix(objs: Set[\"HashFile\"], suffix: str) -> None:\n from itertools import chain\n\n from dvc.data import iterobjs\n\n for obj in chain.from_iterable(map(iterobjs, objs)):\n if obj.name is not None:\n obj.name += suffix\n\n for branch in self.brancher(\n revs=revs,\n all_branches=all_branches,\n all_tags=all_tags,\n all_commits=all_commits,\n all_experiments=all_experiments,\n ):\n for odb, objs in self.index.used_objs(\n targets,\n remote=remote,\n force=force,\n jobs=jobs,\n recursive=recursive,\n with_deps=with_deps,\n ).items():\n if branch:\n _add_suffix(objs, f\" ({branch})\")\n used[odb].update(objs)\n\n if used_run_cache:\n for odb, objs in self.stage_cache.get_used_objs(\n used_run_cache, remote=remote, force=force, jobs=jobs\n ).items():\n used[odb].update(objs)\n\n return used\n\n @property\n def stages(self): # obsolete, only for backward-compatibility\n return self.index.stages\n\n def find_outs_by_path(self, path, outs=None, recursive=False, strict=True):\n # using `outs_graph` to ensure graph checks are run\n outs = outs or self.index.outs_graph\n\n abs_path = os.path.abspath(path)\n fs_path = abs_path\n\n def func(out):\n def eq(one, two):\n return one == two\n\n match = eq if strict else out.fs.path.isin_or_eq\n\n if out.scheme == \"local\" and match(fs_path, out.fs_path):\n return True\n\n if recursive and out.fs.path.isin(out.fs_path, fs_path):\n return True\n\n return False\n\n matched = list(filter(func, outs))\n if not matched:\n raise OutputNotFoundError(path, self)\n\n return matched\n\n def is_dvc_internal(self, path):\n path_parts = os.path.normpath(path).split(os.path.sep)\n return self.DVC_DIR in path_parts\n\n @cached_property\n def dvcfs(self):\n from dvc.fs.dvc import DvcFileSystem\n\n return DvcFileSystem(repo=self)\n\n @cached_property\n def repo_fs(self):\n from dvc.fs.repo import RepoFileSystem\n\n return RepoFileSystem(self, subrepos=self.subrepos, **self._fs_conf)\n\n @cached_property\n def index_db_dir(self):\n return self._get_database_dir(\"index\")\n\n @contextmanager\n def open_by_relpath(self, path, remote=None, mode=\"r\", encoding=None):\n \"\"\"Opens a specified resource as a file descriptor\"\"\"\n from dvc.fs.repo import RepoFileSystem\n\n fs = RepoFileSystem(self, subrepos=True)\n path = self.fs.path.join(self.root_dir, path)\n try:\n with fs.open(\n path, mode=mode, encoding=encoding, remote=remote\n ) as fobj:\n yield fobj\n except FileNotFoundError as exc:\n raise FileMissingError(path) from exc\n except IsADirectoryError as exc:\n raise DvcIsADirectoryError(f\"'{path}' is a directory\") from exc\n\n def close(self):\n self.scm.close()\n self.state.close()\n\n def _reset(self):\n self.state.close()\n self.scm._reset() # pylint: disable=protected-access\n self.__dict__.pop(\"index\", None)\n self.__dict__.pop(\"dvcignore\", None)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._reset()\n self.scm.close()\n", "path": "dvc/repo/__init__.py"}]}
gh_patches_debug_233
rasdani/github-patches
git_diff
learningequality__kolibri-6355
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> tasks got cleared without triggering a 'clear task' action ### Observed behavior Observed that my list of tasks got cleared after initiating a new import ### Expected behavior tasks should not be cleared until explicitly done by the user ### User-facing consequences loss of data: historical context ### Errors and logs none ### Steps to reproduce see notes below ### Context 0.13.0 beta 1 </issue> <code> [start of kolibri/core/content/serializers.py] 1 from django.core.cache import cache 2 from django.db.models import Manager 3 from django.db.models import Sum 4 from django.db.models.query import RawQuerySet 5 from le_utils.constants import content_kinds 6 from rest_framework import serializers 7 8 from kolibri.core.content.models import AssessmentMetaData 9 from kolibri.core.content.models import ChannelMetadata 10 from kolibri.core.content.models import ContentNode 11 from kolibri.core.content.models import File 12 from kolibri.core.content.models import Language 13 from kolibri.core.fields import create_timezonestamp 14 15 16 class DynamicFieldsModelSerializer(serializers.ModelSerializer): 17 def __init__(self, *args, **kwargs): 18 # Instantiate the superclass normally 19 super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs) 20 21 # enable dynamic fields specification! 22 if "request" in self.context and self.context["request"].GET.get( 23 "fields", None 24 ): 25 fields = self.context["request"].GET["fields"].split(",") 26 # Drop any fields that are not specified in the `fields` argument. 27 allowed = set(fields) 28 existing = set(self.fields.keys()) 29 for field_name in existing - allowed: 30 self.fields.pop(field_name) 31 32 33 class ChannelMetadataSerializer(serializers.ModelSerializer): 34 root = serializers.PrimaryKeyRelatedField(read_only=True) 35 lang_code = serializers.SerializerMethodField() 36 lang_name = serializers.SerializerMethodField() 37 available = serializers.SerializerMethodField() 38 num_coach_contents = serializers.IntegerField(source="root.num_coach_contents") 39 40 def get_lang_code(self, instance): 41 if instance.root.lang is None: 42 return None 43 44 return instance.root.lang.lang_code 45 46 def get_lang_name(self, instance): 47 if instance.root.lang is None: 48 return None 49 50 return instance.root.lang.lang_name 51 52 def get_available(self, instance): 53 return instance.root.available 54 55 class Meta: 56 model = ChannelMetadata 57 fields = ( 58 "author", 59 "description", 60 "id", 61 "last_updated", 62 "lang_code", 63 "lang_name", 64 "name", 65 "root", 66 "thumbnail", 67 "version", 68 "available", 69 "num_coach_contents", 70 ) 71 72 73 class PublicChannelSerializer(serializers.ModelSerializer): 74 included_languages = serializers.SerializerMethodField() 75 matching_tokens = serializers.SerializerMethodField("match_tokens") 76 language = serializers.SerializerMethodField() 77 icon_encoding = serializers.SerializerMethodField() 78 last_published = serializers.SerializerMethodField() 79 80 def get_language(self, instance): 81 if instance.root.lang is None: 82 return None 83 84 return instance.root.lang.lang_code 85 86 def get_icon_encoding(self, instance): 87 return instance.thumbnail 88 89 def get_included_languages(self, instance): 90 return list(instance.included_languages.all().values_list("id", flat=True)) 91 92 def get_last_published(self, instance): 93 return ( 94 None 95 if not instance.last_updated 96 else create_timezonestamp(instance.last_updated) 97 ) 98 99 def match_tokens(self, channel): 100 return [] 101 102 class Meta: 103 model = ChannelMetadata 104 fields = ( 105 "id", 106 "name", 107 "language", 108 "included_languages", 109 "description", 110 "total_resource_count", 111 "version", 112 "published_size", 113 "last_published", 114 "icon_encoding", 115 "matching_tokens", 116 "public", 117 ) 118 119 120 class LowerCaseField(serializers.CharField): 121 def to_representation(self, obj): 122 return super(LowerCaseField, self).to_representation(obj).lower() 123 124 125 class LanguageSerializer(serializers.ModelSerializer): 126 id = LowerCaseField(max_length=14) 127 lang_code = LowerCaseField(max_length=3) 128 lang_subcode = LowerCaseField(max_length=10) 129 130 class Meta: 131 model = Language 132 fields = ("id", "lang_code", "lang_subcode", "lang_name", "lang_direction") 133 134 135 class FileSerializer(serializers.ModelSerializer): 136 checksum = serializers.CharField(source="local_file_id") 137 storage_url = serializers.SerializerMethodField() 138 download_url = serializers.SerializerMethodField() 139 extension = serializers.SerializerMethodField() 140 file_size = serializers.SerializerMethodField() 141 lang = LanguageSerializer() 142 available = serializers.BooleanField(source="local_file.available") 143 144 def get_storage_url(self, target_node): 145 return target_node.get_storage_url() 146 147 def get_download_url(self, target_node): 148 return target_node.get_download_url() 149 150 def get_extension(self, target_node): 151 return target_node.get_extension() 152 153 def get_file_size(self, target_node): 154 return target_node.get_file_size() 155 156 class Meta: 157 model = File 158 fields = ( 159 "storage_url", 160 "id", 161 "priority", 162 "available", 163 "file_size", 164 "extension", 165 "checksum", 166 "preset", 167 "lang", 168 "supplementary", 169 "thumbnail", 170 "download_url", 171 ) 172 173 174 class AssessmentMetaDataSerializer(serializers.ModelSerializer): 175 176 assessment_item_ids = serializers.JSONField(default="[]") 177 mastery_model = serializers.JSONField(default="{}") 178 179 class Meta: 180 model = AssessmentMetaData 181 fields = ( 182 "assessment_item_ids", 183 "number_of_assessments", 184 "mastery_model", 185 "randomize", 186 "is_manipulable", 187 ) 188 189 190 def get_summary_logs(content_ids, user): 191 from kolibri.core.logger.models import ContentSummaryLog 192 193 if not content_ids: 194 return ContentSummaryLog.objects.none() 195 # get all summary logs for the current user that correspond to the descendant content nodes 196 return ContentSummaryLog.objects.filter(user=user, content_id__in=content_ids) 197 198 199 def get_topic_progress_fraction(topic, user): 200 leaf_ids = ( 201 topic.get_descendants(include_self=False) 202 .order_by() 203 .exclude(kind=content_kinds.TOPIC) 204 .values_list("content_id", flat=True) 205 ) 206 return round( 207 ( 208 get_summary_logs(leaf_ids, user).aggregate(Sum("progress"))["progress__sum"] 209 or 0 210 ) 211 / (len(leaf_ids) or 1), 212 4, 213 ) 214 215 216 def get_content_progress_fraction(content, user): 217 from kolibri.core.logger.models import ContentSummaryLog 218 219 try: 220 # add up all the progress for the logs, and divide by the total number of content nodes to get overall progress 221 overall_progress = ContentSummaryLog.objects.get( 222 user=user, content_id=content.content_id 223 ).progress 224 except ContentSummaryLog.DoesNotExist: 225 return None 226 return round(overall_progress, 4) 227 228 229 def get_topic_and_content_progress_fraction(node, user): 230 if node.kind == content_kinds.TOPIC: 231 return get_topic_progress_fraction(node, user) 232 else: 233 return get_content_progress_fraction(node, user) 234 235 236 def get_topic_and_content_progress_fractions(nodes, user): 237 leaf_ids = ( 238 nodes.get_descendants(include_self=True) 239 .order_by() 240 .exclude(available=False) 241 .exclude(kind=content_kinds.TOPIC) 242 .values_list("content_id", flat=True) 243 ) 244 245 leaf_node_logs = get_summary_logs(leaf_ids, user) 246 247 overall_progress = {} 248 249 for log in leaf_node_logs.values("content_id", "progress"): 250 overall_progress[log["content_id"]] = round(log["progress"], 4) 251 252 for node in nodes: 253 if node.kind == content_kinds.TOPIC: 254 topic_leaf_ids = ( 255 node.get_descendants(include_self=True) 256 .order_by() 257 .exclude(available=False) 258 .exclude(kind=content_kinds.TOPIC) 259 .values_list("content_id", flat=True) 260 ) 261 262 overall_progress[node.content_id] = ( 263 round( 264 sum(overall_progress.get(leaf_id, 0) for leaf_id in topic_leaf_ids) 265 / len(topic_leaf_ids), 266 4, 267 ) 268 if topic_leaf_ids 269 else 0.0 270 ) 271 272 return overall_progress 273 274 275 def get_content_progress_fractions(nodes, user): 276 if isinstance(nodes, RawQuerySet) or isinstance(nodes, list): 277 leaf_ids = [datum.content_id for datum in nodes] 278 else: 279 leaf_ids = nodes.exclude(kind=content_kinds.TOPIC).values_list( 280 "content_id", flat=True 281 ) 282 283 summary_logs = get_summary_logs(leaf_ids, user) 284 285 # make a lookup dict for all logs to allow mapping from content_id to current progress 286 overall_progress = { 287 log["content_id"]: round(log["progress"], 4) 288 for log in summary_logs.values("content_id", "progress") 289 } 290 return overall_progress 291 292 293 class ContentNodeListSerializer(serializers.ListSerializer): 294 def to_representation(self, data): 295 296 # Dealing with nested relationships, data can be a Manager, 297 # so, first get a queryset from the Manager if needed 298 data = data.all() if isinstance(data, Manager) else data 299 300 # initialize cache key 301 cache_key = None 302 303 # ensure that we are filtering by the parent only 304 # this allows us to only cache results on the learn page 305 from .api import ContentNodeFilter 306 307 parent_filter_only = set(self.context["request"].GET.keys()).intersection( 308 ContentNodeFilter.Meta.fields 309 ) == set(["parent"]) 310 311 # Cache parent look ups only 312 if parent_filter_only: 313 cache_key = "contentnode_list_{parent}".format( 314 parent=self.context["request"].GET.get("parent") 315 ) 316 317 if cache.get(cache_key): 318 return cache.get(cache_key) 319 320 if not data: 321 return data 322 323 if ( 324 "request" not in self.context 325 or not self.context["request"].user.is_facility_user 326 ): 327 progress_dict = {} 328 else: 329 user = self.context["request"].user 330 # Don't annotate topic progress as too expensive 331 progress_dict = get_content_progress_fractions(data, user) 332 333 result = [] 334 topic_only = True 335 336 # Allow results to be limited after all queryset filtering has occurred 337 if self.limit: 338 data = data[: self.limit] 339 340 for item in data: 341 obj = self.child.to_representation( 342 item, 343 progress_fraction=progress_dict.get(item.content_id), 344 annotate_progress_fraction=False, 345 ) 346 topic_only = topic_only and obj.get("kind") == content_kinds.TOPIC 347 result.append(obj) 348 349 # Only store if all nodes are topics, because we don't annotate progress on them 350 # This has the happy side effect of not caching our dynamically calculated 351 # recommendation queries, which might change for the same user over time 352 # because they do not return topics 353 if topic_only and parent_filter_only: 354 cache.set(cache_key, result, 60 * 10) 355 356 return result 357 358 359 class ContentNodeSerializer(DynamicFieldsModelSerializer): 360 parent = serializers.PrimaryKeyRelatedField(read_only=True) 361 files = FileSerializer(many=True, read_only=True) 362 assessmentmetadata = AssessmentMetaDataSerializer( 363 read_only=True, allow_null=True, many=True 364 ) 365 lang = LanguageSerializer() 366 367 class Meta: 368 model = ContentNode 369 fields = ( 370 "id", 371 "assessmentmetadata", 372 "author", 373 "available", 374 "channel_id", 375 "coach_content", 376 "content_id", 377 "description", 378 "files", 379 "kind", 380 "lang", 381 "license_description", 382 "license_name", 383 "license_owner", 384 "num_coach_contents", 385 "parent", 386 "sort_order", 387 "title", 388 ) 389 list_serializer_class = ContentNodeListSerializer 390 391 def __new__(cls, *args, **kwargs): 392 # This is overwritten to provide a ListClassSerializer for many=True 393 limit = kwargs.pop("limit", None) 394 new = super(ContentNodeSerializer, cls).__new__(cls, *args, **kwargs) 395 new.limit = limit 396 return new 397 398 def to_representation( 399 self, instance, progress_fraction=None, annotate_progress_fraction=True 400 ): 401 if progress_fraction is None and annotate_progress_fraction: 402 if ( 403 "request" not in self.context 404 or not self.context["request"].user.is_facility_user 405 ): 406 # Don't try to annotate for a non facility user 407 progress_fraction = 0.0 408 else: 409 user = self.context["request"].user 410 if instance.kind != content_kinds.TOPIC: 411 progress_fraction = get_content_progress_fraction(instance, user) 412 value = super(ContentNodeSerializer, self).to_representation(instance) 413 value["progress_fraction"] = progress_fraction 414 return value 415 416 417 class ContentNodeGranularSerializer(serializers.ModelSerializer): 418 num_coach_contents = serializers.SerializerMethodField() 419 coach_content = serializers.SerializerMethodField() 420 total_resources = serializers.SerializerMethodField() 421 importable = serializers.SerializerMethodField() 422 423 class Meta: 424 model = ContentNode 425 fields = ( 426 "id", 427 "available", 428 "coach_content", 429 "importable", 430 "kind", 431 "num_coach_contents", 432 "on_device_resources", 433 "title", 434 "total_resources", 435 ) 436 437 @property 438 def channel_stats(self): 439 return self.context["channel_stats"] 440 441 def get_total_resources(self, instance): 442 # channel_stats is None for export 443 if self.channel_stats is None: 444 return instance.on_device_resources 445 return self.channel_stats.get(instance.id, {"total_resources": 0})[ 446 "total_resources" 447 ] 448 449 def get_num_coach_contents(self, instance): 450 # If for exporting, only show what is available on server. For importing, 451 # show all of the coach contents in the topic. 452 if self.channel_stats is None: 453 return instance.num_coach_contents 454 return self.channel_stats.get(instance.id, {"num_coach_contents": 0})[ 455 "num_coach_contents" 456 ] 457 458 def get_coach_content(self, instance): 459 # If for exporting, only show what is on server. For importing, 460 # show all of the coach contents in the topic. 461 if self.channel_stats is None: 462 return instance.coach_content 463 return self.channel_stats.get(instance.id, {"coach_content": False})[ 464 "coach_content" 465 ] 466 467 def get_importable(self, instance): 468 # If for export, just return None 469 if self.channel_stats is None: 470 return None 471 return instance.id in self.channel_stats 472 473 474 class ContentNodeProgressListSerializer(serializers.ListSerializer): 475 def to_representation(self, data): 476 477 if not data: 478 return data 479 480 if ( 481 "request" not in self.context 482 or not self.context["request"].user.is_facility_user 483 ): 484 progress_dict = {} 485 else: 486 user = self.context["request"].user 487 # Don't annotate topic progress as too expensive 488 progress_dict = get_topic_and_content_progress_fractions(data, user) 489 490 # Dealing with nested relationships, data can be a Manager, 491 # so, first get a queryset from the Manager if needed 492 iterable = data.all() if isinstance(data, Manager) else data 493 494 return [ 495 self.child.to_representation( 496 item, 497 progress_fraction=progress_dict.get(item.content_id, 0.0), 498 annotate_progress_fraction=False, 499 ) 500 for item in iterable 501 ] 502 503 504 class ContentNodeProgressSerializer(serializers.Serializer): 505 def to_representation( 506 self, instance, progress_fraction=None, annotate_progress_fraction=True 507 ): 508 if progress_fraction is None and annotate_progress_fraction: 509 if ( 510 "request" not in self.context 511 or not self.context["request"].user.is_facility_user 512 ): 513 # Don't try to annotate for a non facility user 514 progress_fraction = 0 515 else: 516 user = self.context["request"].user 517 progress_fraction = ( 518 get_topic_and_content_progress_fraction(instance, user) or 0.0 519 ) 520 return {"id": instance.id, "progress_fraction": progress_fraction} 521 522 class Meta: 523 list_serializer_class = ContentNodeProgressListSerializer 524 [end of kolibri/core/content/serializers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kolibri/core/content/serializers.py b/kolibri/core/content/serializers.py --- a/kolibri/core/content/serializers.py +++ b/kolibri/core/content/serializers.py @@ -67,6 +67,7 @@ "version", "available", "num_coach_contents", + "public", )
{"golden_diff": "diff --git a/kolibri/core/content/serializers.py b/kolibri/core/content/serializers.py\n--- a/kolibri/core/content/serializers.py\n+++ b/kolibri/core/content/serializers.py\n@@ -67,6 +67,7 @@\n \"version\",\n \"available\",\n \"num_coach_contents\",\n+ \"public\",\n )\n", "issue": "tasks got cleared without triggering a 'clear task' action\n### Observed behavior\r\n\r\nObserved that my list of tasks got cleared after initiating a new import\r\n\r\n### Expected behavior\r\n\r\ntasks should not be cleared until explicitly done by the user\r\n\r\n### User-facing consequences\r\n\r\nloss of data: historical context\r\n\r\n### Errors and logs\r\n\r\nnone\r\n\r\n### Steps to reproduce\r\n\r\nsee notes below\r\n\r\n### Context\r\n\r\n0.13.0 beta 1\n", "before_files": [{"content": "from django.core.cache import cache\nfrom django.db.models import Manager\nfrom django.db.models import Sum\nfrom django.db.models.query import RawQuerySet\nfrom le_utils.constants import content_kinds\nfrom rest_framework import serializers\n\nfrom kolibri.core.content.models import AssessmentMetaData\nfrom kolibri.core.content.models import ChannelMetadata\nfrom kolibri.core.content.models import ContentNode\nfrom kolibri.core.content.models import File\nfrom kolibri.core.content.models import Language\nfrom kolibri.core.fields import create_timezonestamp\n\n\nclass DynamicFieldsModelSerializer(serializers.ModelSerializer):\n def __init__(self, *args, **kwargs):\n # Instantiate the superclass normally\n super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)\n\n # enable dynamic fields specification!\n if \"request\" in self.context and self.context[\"request\"].GET.get(\n \"fields\", None\n ):\n fields = self.context[\"request\"].GET[\"fields\"].split(\",\")\n # Drop any fields that are not specified in the `fields` argument.\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n\nclass ChannelMetadataSerializer(serializers.ModelSerializer):\n root = serializers.PrimaryKeyRelatedField(read_only=True)\n lang_code = serializers.SerializerMethodField()\n lang_name = serializers.SerializerMethodField()\n available = serializers.SerializerMethodField()\n num_coach_contents = serializers.IntegerField(source=\"root.num_coach_contents\")\n\n def get_lang_code(self, instance):\n if instance.root.lang is None:\n return None\n\n return instance.root.lang.lang_code\n\n def get_lang_name(self, instance):\n if instance.root.lang is None:\n return None\n\n return instance.root.lang.lang_name\n\n def get_available(self, instance):\n return instance.root.available\n\n class Meta:\n model = ChannelMetadata\n fields = (\n \"author\",\n \"description\",\n \"id\",\n \"last_updated\",\n \"lang_code\",\n \"lang_name\",\n \"name\",\n \"root\",\n \"thumbnail\",\n \"version\",\n \"available\",\n \"num_coach_contents\",\n )\n\n\nclass PublicChannelSerializer(serializers.ModelSerializer):\n included_languages = serializers.SerializerMethodField()\n matching_tokens = serializers.SerializerMethodField(\"match_tokens\")\n language = serializers.SerializerMethodField()\n icon_encoding = serializers.SerializerMethodField()\n last_published = serializers.SerializerMethodField()\n\n def get_language(self, instance):\n if instance.root.lang is None:\n return None\n\n return instance.root.lang.lang_code\n\n def get_icon_encoding(self, instance):\n return instance.thumbnail\n\n def get_included_languages(self, instance):\n return list(instance.included_languages.all().values_list(\"id\", flat=True))\n\n def get_last_published(self, instance):\n return (\n None\n if not instance.last_updated\n else create_timezonestamp(instance.last_updated)\n )\n\n def match_tokens(self, channel):\n return []\n\n class Meta:\n model = ChannelMetadata\n fields = (\n \"id\",\n \"name\",\n \"language\",\n \"included_languages\",\n \"description\",\n \"total_resource_count\",\n \"version\",\n \"published_size\",\n \"last_published\",\n \"icon_encoding\",\n \"matching_tokens\",\n \"public\",\n )\n\n\nclass LowerCaseField(serializers.CharField):\n def to_representation(self, obj):\n return super(LowerCaseField, self).to_representation(obj).lower()\n\n\nclass LanguageSerializer(serializers.ModelSerializer):\n id = LowerCaseField(max_length=14)\n lang_code = LowerCaseField(max_length=3)\n lang_subcode = LowerCaseField(max_length=10)\n\n class Meta:\n model = Language\n fields = (\"id\", \"lang_code\", \"lang_subcode\", \"lang_name\", \"lang_direction\")\n\n\nclass FileSerializer(serializers.ModelSerializer):\n checksum = serializers.CharField(source=\"local_file_id\")\n storage_url = serializers.SerializerMethodField()\n download_url = serializers.SerializerMethodField()\n extension = serializers.SerializerMethodField()\n file_size = serializers.SerializerMethodField()\n lang = LanguageSerializer()\n available = serializers.BooleanField(source=\"local_file.available\")\n\n def get_storage_url(self, target_node):\n return target_node.get_storage_url()\n\n def get_download_url(self, target_node):\n return target_node.get_download_url()\n\n def get_extension(self, target_node):\n return target_node.get_extension()\n\n def get_file_size(self, target_node):\n return target_node.get_file_size()\n\n class Meta:\n model = File\n fields = (\n \"storage_url\",\n \"id\",\n \"priority\",\n \"available\",\n \"file_size\",\n \"extension\",\n \"checksum\",\n \"preset\",\n \"lang\",\n \"supplementary\",\n \"thumbnail\",\n \"download_url\",\n )\n\n\nclass AssessmentMetaDataSerializer(serializers.ModelSerializer):\n\n assessment_item_ids = serializers.JSONField(default=\"[]\")\n mastery_model = serializers.JSONField(default=\"{}\")\n\n class Meta:\n model = AssessmentMetaData\n fields = (\n \"assessment_item_ids\",\n \"number_of_assessments\",\n \"mastery_model\",\n \"randomize\",\n \"is_manipulable\",\n )\n\n\ndef get_summary_logs(content_ids, user):\n from kolibri.core.logger.models import ContentSummaryLog\n\n if not content_ids:\n return ContentSummaryLog.objects.none()\n # get all summary logs for the current user that correspond to the descendant content nodes\n return ContentSummaryLog.objects.filter(user=user, content_id__in=content_ids)\n\n\ndef get_topic_progress_fraction(topic, user):\n leaf_ids = (\n topic.get_descendants(include_self=False)\n .order_by()\n .exclude(kind=content_kinds.TOPIC)\n .values_list(\"content_id\", flat=True)\n )\n return round(\n (\n get_summary_logs(leaf_ids, user).aggregate(Sum(\"progress\"))[\"progress__sum\"]\n or 0\n )\n / (len(leaf_ids) or 1),\n 4,\n )\n\n\ndef get_content_progress_fraction(content, user):\n from kolibri.core.logger.models import ContentSummaryLog\n\n try:\n # add up all the progress for the logs, and divide by the total number of content nodes to get overall progress\n overall_progress = ContentSummaryLog.objects.get(\n user=user, content_id=content.content_id\n ).progress\n except ContentSummaryLog.DoesNotExist:\n return None\n return round(overall_progress, 4)\n\n\ndef get_topic_and_content_progress_fraction(node, user):\n if node.kind == content_kinds.TOPIC:\n return get_topic_progress_fraction(node, user)\n else:\n return get_content_progress_fraction(node, user)\n\n\ndef get_topic_and_content_progress_fractions(nodes, user):\n leaf_ids = (\n nodes.get_descendants(include_self=True)\n .order_by()\n .exclude(available=False)\n .exclude(kind=content_kinds.TOPIC)\n .values_list(\"content_id\", flat=True)\n )\n\n leaf_node_logs = get_summary_logs(leaf_ids, user)\n\n overall_progress = {}\n\n for log in leaf_node_logs.values(\"content_id\", \"progress\"):\n overall_progress[log[\"content_id\"]] = round(log[\"progress\"], 4)\n\n for node in nodes:\n if node.kind == content_kinds.TOPIC:\n topic_leaf_ids = (\n node.get_descendants(include_self=True)\n .order_by()\n .exclude(available=False)\n .exclude(kind=content_kinds.TOPIC)\n .values_list(\"content_id\", flat=True)\n )\n\n overall_progress[node.content_id] = (\n round(\n sum(overall_progress.get(leaf_id, 0) for leaf_id in topic_leaf_ids)\n / len(topic_leaf_ids),\n 4,\n )\n if topic_leaf_ids\n else 0.0\n )\n\n return overall_progress\n\n\ndef get_content_progress_fractions(nodes, user):\n if isinstance(nodes, RawQuerySet) or isinstance(nodes, list):\n leaf_ids = [datum.content_id for datum in nodes]\n else:\n leaf_ids = nodes.exclude(kind=content_kinds.TOPIC).values_list(\n \"content_id\", flat=True\n )\n\n summary_logs = get_summary_logs(leaf_ids, user)\n\n # make a lookup dict for all logs to allow mapping from content_id to current progress\n overall_progress = {\n log[\"content_id\"]: round(log[\"progress\"], 4)\n for log in summary_logs.values(\"content_id\", \"progress\")\n }\n return overall_progress\n\n\nclass ContentNodeListSerializer(serializers.ListSerializer):\n def to_representation(self, data):\n\n # Dealing with nested relationships, data can be a Manager,\n # so, first get a queryset from the Manager if needed\n data = data.all() if isinstance(data, Manager) else data\n\n # initialize cache key\n cache_key = None\n\n # ensure that we are filtering by the parent only\n # this allows us to only cache results on the learn page\n from .api import ContentNodeFilter\n\n parent_filter_only = set(self.context[\"request\"].GET.keys()).intersection(\n ContentNodeFilter.Meta.fields\n ) == set([\"parent\"])\n\n # Cache parent look ups only\n if parent_filter_only:\n cache_key = \"contentnode_list_{parent}\".format(\n parent=self.context[\"request\"].GET.get(\"parent\")\n )\n\n if cache.get(cache_key):\n return cache.get(cache_key)\n\n if not data:\n return data\n\n if (\n \"request\" not in self.context\n or not self.context[\"request\"].user.is_facility_user\n ):\n progress_dict = {}\n else:\n user = self.context[\"request\"].user\n # Don't annotate topic progress as too expensive\n progress_dict = get_content_progress_fractions(data, user)\n\n result = []\n topic_only = True\n\n # Allow results to be limited after all queryset filtering has occurred\n if self.limit:\n data = data[: self.limit]\n\n for item in data:\n obj = self.child.to_representation(\n item,\n progress_fraction=progress_dict.get(item.content_id),\n annotate_progress_fraction=False,\n )\n topic_only = topic_only and obj.get(\"kind\") == content_kinds.TOPIC\n result.append(obj)\n\n # Only store if all nodes are topics, because we don't annotate progress on them\n # This has the happy side effect of not caching our dynamically calculated\n # recommendation queries, which might change for the same user over time\n # because they do not return topics\n if topic_only and parent_filter_only:\n cache.set(cache_key, result, 60 * 10)\n\n return result\n\n\nclass ContentNodeSerializer(DynamicFieldsModelSerializer):\n parent = serializers.PrimaryKeyRelatedField(read_only=True)\n files = FileSerializer(many=True, read_only=True)\n assessmentmetadata = AssessmentMetaDataSerializer(\n read_only=True, allow_null=True, many=True\n )\n lang = LanguageSerializer()\n\n class Meta:\n model = ContentNode\n fields = (\n \"id\",\n \"assessmentmetadata\",\n \"author\",\n \"available\",\n \"channel_id\",\n \"coach_content\",\n \"content_id\",\n \"description\",\n \"files\",\n \"kind\",\n \"lang\",\n \"license_description\",\n \"license_name\",\n \"license_owner\",\n \"num_coach_contents\",\n \"parent\",\n \"sort_order\",\n \"title\",\n )\n list_serializer_class = ContentNodeListSerializer\n\n def __new__(cls, *args, **kwargs):\n # This is overwritten to provide a ListClassSerializer for many=True\n limit = kwargs.pop(\"limit\", None)\n new = super(ContentNodeSerializer, cls).__new__(cls, *args, **kwargs)\n new.limit = limit\n return new\n\n def to_representation(\n self, instance, progress_fraction=None, annotate_progress_fraction=True\n ):\n if progress_fraction is None and annotate_progress_fraction:\n if (\n \"request\" not in self.context\n or not self.context[\"request\"].user.is_facility_user\n ):\n # Don't try to annotate for a non facility user\n progress_fraction = 0.0\n else:\n user = self.context[\"request\"].user\n if instance.kind != content_kinds.TOPIC:\n progress_fraction = get_content_progress_fraction(instance, user)\n value = super(ContentNodeSerializer, self).to_representation(instance)\n value[\"progress_fraction\"] = progress_fraction\n return value\n\n\nclass ContentNodeGranularSerializer(serializers.ModelSerializer):\n num_coach_contents = serializers.SerializerMethodField()\n coach_content = serializers.SerializerMethodField()\n total_resources = serializers.SerializerMethodField()\n importable = serializers.SerializerMethodField()\n\n class Meta:\n model = ContentNode\n fields = (\n \"id\",\n \"available\",\n \"coach_content\",\n \"importable\",\n \"kind\",\n \"num_coach_contents\",\n \"on_device_resources\",\n \"title\",\n \"total_resources\",\n )\n\n @property\n def channel_stats(self):\n return self.context[\"channel_stats\"]\n\n def get_total_resources(self, instance):\n # channel_stats is None for export\n if self.channel_stats is None:\n return instance.on_device_resources\n return self.channel_stats.get(instance.id, {\"total_resources\": 0})[\n \"total_resources\"\n ]\n\n def get_num_coach_contents(self, instance):\n # If for exporting, only show what is available on server. For importing,\n # show all of the coach contents in the topic.\n if self.channel_stats is None:\n return instance.num_coach_contents\n return self.channel_stats.get(instance.id, {\"num_coach_contents\": 0})[\n \"num_coach_contents\"\n ]\n\n def get_coach_content(self, instance):\n # If for exporting, only show what is on server. For importing,\n # show all of the coach contents in the topic.\n if self.channel_stats is None:\n return instance.coach_content\n return self.channel_stats.get(instance.id, {\"coach_content\": False})[\n \"coach_content\"\n ]\n\n def get_importable(self, instance):\n # If for export, just return None\n if self.channel_stats is None:\n return None\n return instance.id in self.channel_stats\n\n\nclass ContentNodeProgressListSerializer(serializers.ListSerializer):\n def to_representation(self, data):\n\n if not data:\n return data\n\n if (\n \"request\" not in self.context\n or not self.context[\"request\"].user.is_facility_user\n ):\n progress_dict = {}\n else:\n user = self.context[\"request\"].user\n # Don't annotate topic progress as too expensive\n progress_dict = get_topic_and_content_progress_fractions(data, user)\n\n # Dealing with nested relationships, data can be a Manager,\n # so, first get a queryset from the Manager if needed\n iterable = data.all() if isinstance(data, Manager) else data\n\n return [\n self.child.to_representation(\n item,\n progress_fraction=progress_dict.get(item.content_id, 0.0),\n annotate_progress_fraction=False,\n )\n for item in iterable\n ]\n\n\nclass ContentNodeProgressSerializer(serializers.Serializer):\n def to_representation(\n self, instance, progress_fraction=None, annotate_progress_fraction=True\n ):\n if progress_fraction is None and annotate_progress_fraction:\n if (\n \"request\" not in self.context\n or not self.context[\"request\"].user.is_facility_user\n ):\n # Don't try to annotate for a non facility user\n progress_fraction = 0\n else:\n user = self.context[\"request\"].user\n progress_fraction = (\n get_topic_and_content_progress_fraction(instance, user) or 0.0\n )\n return {\"id\": instance.id, \"progress_fraction\": progress_fraction}\n\n class Meta:\n list_serializer_class = ContentNodeProgressListSerializer\n", "path": "kolibri/core/content/serializers.py"}]}
gh_patches_debug_65930
rasdani/github-patches
git_diff
iterative__dvc-2882
"You will be provided with a partial code base and an issue statement explaining a problem to resolv(...TRUNCATED)
"diff --git a/dvc/remote/base.py b/dvc/remote/base.py\n--- a/dvc/remote/base.py\n+++ b/dvc/remote/ba(...TRUNCATED)
"{\"golden_diff\": \"diff --git a/dvc/remote/base.py b/dvc/remote/base.py\\n--- a/dvc/remote/base.py(...TRUNCATED)
gh_patches_debug_60514
rasdani/github-patches
git_diff
kedro-org__kedro-1706
"You will be provided with a partial code base and an issue statement explaining a problem to resolv(...TRUNCATED)
"diff --git a/features/steps/cli_steps.py b/features/steps/cli_steps.py\n--- a/features/steps/cli_st(...TRUNCATED)
"{\"golden_diff\": \"diff --git a/features/steps/cli_steps.py b/features/steps/cli_steps.py\\n--- a/(...TRUNCATED)
gh_patches_debug_41327
rasdani/github-patches
git_diff
ibis-project__ibis-1988
"You will be provided with a partial code base and an issue statement explaining a problem to resolv(...TRUNCATED)
"diff --git a/ibis/pyspark/compiler.py b/ibis/pyspark/compiler.py\n--- a/ibis/pyspark/compiler.py\n+(...TRUNCATED)
"{\"golden_diff\": \"diff --git a/ibis/pyspark/compiler.py b/ibis/pyspark/compiler.py\\n--- a/ibis/p(...TRUNCATED)
gh_patches_debug_3876
rasdani/github-patches
git_diff
xorbitsai__inference-299
"You will be provided with a partial code base and an issue statement explaining a problem to resolv(...TRUNCATED)
"diff --git a/examples/gradio_chatinterface.py b/examples/gradio_chatinterface.py\n--- a/examples/gr(...TRUNCATED)
"{\"golden_diff\": \"diff --git a/examples/gradio_chatinterface.py b/examples/gradio_chatinterface.p(...TRUNCATED)
gh_patches_debug_57507
rasdani/github-patches
git_diff
scikit-image__scikit-image-7095
"You will be provided with a partial code base and an issue statement explaining a problem to resolv(...TRUNCATED)
"diff --git a/skimage/morphology/_skeletonize.py b/skimage/morphology/_skeletonize.py\n--- a/skimage(...TRUNCATED)
"{\"golden_diff\": \"diff --git a/skimage/morphology/_skeletonize.py b/skimage/morphology/_skeletoni(...TRUNCATED)
gh_patches_debug_29709
rasdani/github-patches
git_diff
opsdroid__opsdroid-1306
"You will be provided with a partial code base and an issue statement explaining a problem to resolv(...TRUNCATED)
"diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__init__.py\n--- a/ops(...TRUNCATED)
"{\"golden_diff\": \"diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__(...TRUNCATED)
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
0