python_code stringlengths 0 34.9k |
|---|
from abc import ABC, abstractmethod
from typing import Optional, Dict, Any, List, Tuple, NamedTuple
from dpu_utils.ptutils import BaseComponent
from mlcomponents.embeddings import SequenceEmbedder
class SeqDecoder(BaseComponent, ABC):
def __init__(self, name: str, token_encoder: SequenceEmbedder,
... |
from .seqdecoder import SeqDecoder
from .grudecoder import GruDecoder
from .grucopyingdecoder import GruCopyingDecoder
from .luongattention import LuongAttention
__all__ = [SeqDecoder, GruDecoder, GruCopyingDecoder, LuongAttention] |
from typing import Optional, Dict, Any, NamedTuple, List, Tuple
import numpy as np
import torch
from dpu_utils.mlutils import Vocabulary
import torch
from torch import nn
from data.spanutils import get_copyable_spans
from mlcomponents.embeddings import TokenSequenceEmbedder
from mlcomponents.seqdecoding import SeqDec... |
import heapq
from collections import defaultdict
from typing import Dict, Any, Optional, List, Tuple, NamedTuple
import numpy as np
import torch
from dpu_utils.mlutils import Vocabulary
from torch import nn
from mlcomponents.embeddings import TokenSequenceEmbedder
from . import SeqDecoder
from .luongattention import ... |
from typing import Dict, Any, Optional, List, Tuple
import torch
from torch import nn
from mlcomponents.embeddings import SequenceEmbedder
from . import SeqDecoder
from .luongattention import LuongAttention
class GruDecoder(SeqDecoder):
def __init__(self, name: str, token_encoder: SequenceEmbedder,
... |
from collections import Counter
from difflib import SequenceMatcher
from typing import List, NamedTuple, Set, Tuple, Dict
class EditEvaluator:
"""Evaluate a (code) editing model."""
def __init__(self):
self.__num_samples = 0 # type: int
self.__sum_exact_matches = 0 # type: int
# Do... |
from typing import Iterable, Callable
from dpu_utils.utils import RichPath
from data import fcedataloader as fcedataloader, codadataloader as codedataloader, \
wikieditsloader as wikiatomiceditsloader, paraphraseloader
from data.edits import Edit
from data.jsonldata import parse_jsonl_edit_data, parse_monolingual... |
from typing import Iterator
from dpu_utils.utils import RichPath
from data.edits import Edit, EditContext, CONTEXT_SEPERATOR
def load_data_from(file: RichPath) -> Iterator[Edit]:
data = file.read_by_file_suffix()
for line in data:
yield Edit(
input_sequence=line['PrevCodeChunkTokens'],
... |
import logging
from typing import Iterator, List, Tuple, NamedTuple
from dpu_utils.utils import RichPath
from data.edits import Edit
def load_data_from(file: RichPath) -> Iterator[Edit]:
num_excluded_samples = 0
with open(file.to_local_path().path) as f:
for i, row in enumerate(f):
edit_... |
#!/usr/bin/env python
"""
Usage:
monolingualprocess.py bert-tokenize [options] INPUT_DATA OUTPUT_DATA_PATH
monolingualprocess.py bert-tokenize multiple [options] INPUT_DATA_LIST OUTPUT_DATA_PATH
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure s... |
import gzip
import logging
from typing import Optional, Iterator, List
from dpu_utils.utils import RichPath
from data.edits import Edit
def clean_up_sentence(tokens: List[str]) -> List[str]:
# Remove empty spaces
return [t.strip() for t in tokens if len(t.strip()) > 0]
def load_data_from(file: RichPath, max... |
import json
import os
from typing import List, Callable, TypeVar, Generic, Optional
import numpy as np
from annoy import AnnoyIndex
from sklearn.manifold import TSNE
from data.representationviz import RepresentationsVisualizer
T = TypeVar('T')
class NLRepresentationsVisualizer(RepresentationsVisualizer):
def __i... |
import random
from typing import Iterator, TypeVar, Iterable, Callable
class LazyDataIterable(Iterable):
def __init__(self, base_iterable_func: Callable[[], Iterator]):
self.__base_iterable_func = base_iterable_func
def __iter__(self):
return self.__base_iterable_func()
|
"""
Code from https://github.com/kilink/ghdiff
"""
import difflib
import six
import html
def escape(text):
return html.escape(text)
def diff(a, b, n=4):
if isinstance(a, six.string_types):
a = a.splitlines()
if isinstance(b, six.string_types):
b = b.splitlines()
return colorize(list(d... |
from typing import Iterator, Dict, Union, List
from collections import Counter
import numpy as np
from dpu_utils.utils import RichPath
from data.edits import Edit, NLEdit
def parse_jsonl_edit_data(path: RichPath) -> Iterator[Edit]:
for line in path.read_as_jsonl():
yield Edit(
input_sequence... |
import difflib
from enum import Enum
from typing import NamedTuple, TypeVar, Optional, List, Dict
import enum
Edit = NamedTuple('Edit', [
('input_sequence', List[str]),
('output_sequence', List[str]),
('provenance', str),
('edit_type', List[str])
])
NLEdit = NamedTuple('NLEdit', [
('input_sequenc... |
import logging
from typing import Optional, Iterator, List
from dpu_utils.utils import RichPath
from data.edits import Edit
def clean_up_sentence(tokens: List[str]) -> List[str]:
# Remove empty spaces
return [t.strip() for t in tokens if len(t.strip()) > 0]
def load_data_from(file: RichPath, max_size_to_loa... |
#!/usr/bin/env python
"""
Usage:
convertcnndmgraphs.py INPUTS_JSONL SUMMARIES_JSONL OUTPUT_DATA_PATH
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
-h --help Show this screen.
--debug Enable deb... |
from typing import List
import numpy as np
def get_copyable_spans(input: List[str], output: List[str]) -> np.ndarray:
"""
Return a 3D tensor copy_mask[k, i, j] that for a given location k shows the all the possible
spans that can be copied.
All valid start locations can be obtained at point k b... |
#!/usr/bin/env python
"""
Usage:
paralleltoedit.py BEFORE AFTER OUTPUT_DATA_PATH
Options:
--azure-info=<path> Azure authentication information file (JSON). Used to load data from Azure storage.
-h --help Show this screen.
--debug Enable debug routines. [defaul... |
import json
import os
from typing import List, Callable, TypeVar, Generic, Optional
import numpy as np
from annoy import AnnoyIndex
from sklearn.manifold import TSNE
T = TypeVar('T')
class RepresentationsVisualizer(Generic[T]):
def __init__(self, labeler: Callable[[T], str], colorer: Callable[[T], str]=None, dis... |
from typing import Iterator, List, Tuple
from dpu_utils.utils import RichPath
from data.edits import Edit
def apply_edits(original_sentence: List[str], edits: List[Tuple[int, int, List[str]]]) -> List[str]:
edited_sentence = []
last_edit_idx = 0
for from_idx, to_idx, edit in edits:
edited_senten... |
from typing import List
import numpy as np
from data.edits import Edit
all_chars = [chr(65+i) for i in range(26)] + [chr(97+i) for i in range(26)]
def create_random_sequences(min_size: int, max_size: int, num_sequences_per_size: int):
for seq_size in range(min_size, max_size):
all_input_seqs = set()
... |
from flask import Flask, render_template, request, make_response, g
from redis import Redis
import os
import socket
import random
import json
option_a = os.getenv('OPTION_A', "<option2>")
option_b = os.getenv('OPTION_B', "<option1>")
hostname = socket.gethostname()
app = Flask(__name__)
def get_redis():
if not h... |
plt.imshow(recognisedimage['original'], interpolation='nearest', cmap=plt.cm.Greys_r)
plt.show()
recognisedimage = min(trainimages[:x], key=lambda e: sum((e['singular']-testimage['singular'])**2))
from scipy import misc
trainimages = []
for i in range(x):
A = misc.imread(str(i) + '.png', flatten=True)
B,... |
import scipy
import numpy as np
import matplotlib.pyplot as plt
from sklearn.externals._pilutil import imread
import os
os.chdir('data/images_part1')
trainimage = []
for i in range(11):
A = imread(str(i) + '.png', flatten = True)
B, c, D = np.linalg.svd(A)
trainimage.append({'original': A, 'singular': c[:1... |
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.metrics import accuracy_score
import os
os.chdir("data")
seed = 1234
#get data
forestation= pd.read_csv('forestation.csv')
forestation
... |
from pylab import *
x = [1,2,3,4,5,6,7,8,9,10,11]
y = [11,12,25,21,31,40,48,55,54,60,61]
scatter (x,y)
(m,c)=polyfit(x,y,1)
print ("Slope(m),", m)
print ("y-intercept (c),", c)
yp=polyval([m,c],x)
x2 = 12
y2 = m*x2 + c
print ("Predicted value of y in month 12,", y2)
plot(x2, y2, 'ro')
plot(x,yp)
gri... |
from sklearn import tree
from sklearn.tree import export_graphviz
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pandas as pd
import graphviz
import os
os.chdir("data")
seed = 1234
power_investment = pd.read_csv('powergen.csv')
y= power_investment[['Profitable']]
X = pd.get_dummies(power_inv... |
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.metrics import accuracy_score
import os
os.chdir("data")
seed = 1234
forestation= pd.read_csv('forestation_1.csv')
forestation
y= fores... |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.externals._pilutil import imread
import os
os.chdir('data/images_part2')
trainimage = []
for i in range(22):
A = imread(str(i) + '.tif', flatten = True)
B, c, D = np.linalg.svd(A)
trainimage.append({'original': A, 'singular': c[:21]})
testi... |
#!/usr/bin/python
import tensorflow as tf
hello = tf.constant('Hello, TensorFlow!')
sess = tf.Session()
print(sess.run(hello)) |
import csv
import sys
from math import sin, cos, sqrt, atan2, radians
import datetime
import time
from azure.storage.blob import AppendBlobService
# Configure account name with the Azure Storage Account Name and the account Key from Storage Explorer
append_blob_service = AppendBlobService(
account_name='storage_acco... |
# Usage: Call python3 controller.py X, where X is the number of SLURM
# jobs you SLURM to spawn on the SLURM nodes
import csv
import sys
import subprocess
import datetime
import time
from azure.storage.blob import AppendBlobService
# Configure account name with the Azure Storage Account Name and the account Key fro... |
import os, socket, sys, json
from base64 import b64encode, b64decode
from hashlib import sha256
from time import time
from urllib.parse import quote_plus, urlencode
from hmac import HMAC
import paho.mqtt.client as mqtt
conn_str = os.getenv("conn_str")
osname = ""
rid = 0
if sys.platform == "linux":
osname = str(os... |
import os, socket, sys, json
from azure.iot.device import IoTHubDeviceClient, Message, MethodResponse
conn_str = os.getenv("conn_str")
osname = ""
if sys.platform == "linux":
osname = str(os.uname().release + " " + os.uname().version + " " + os.uname().machine)
else:
osname = str("Windows build " + str(sys.get... |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.