repo_name
string | combined_content
string | file_paths
list |
|---|---|---|
0-Ajay-Bhargav-0/FashHUB
|
from django.shortcuts import render,redirect,reverse,HttpResponse
from django.contrib.auth.models import User,auth
from django.contrib import messages
from .forms import ProfileForm,UserForm
from .models import Profile
def register(request):
if request.method == 'POST':
username = request.POST['username']
email = request.POST['email']
phone_number = request.POST['phone_number']
birth_date = request.POST['birth_date']
password1 = request.POST['password1']
password2 = request.POST['password2']
user = User.objects.create_user(username=username,email=email,password=password1)
user.save()
profile = Profile.objects.get(user=user)
profile.phone_number=phone_number
profile.birth_date=birth_date
profile.save()
print("user created")
return redirect('/accounts/login')
return render(request,'register.html')
def login(request):
if request.method=='POST':
username=request.POST['username']
password=request.POST['password']
user=auth.authenticate(username=username,password=password)
if user is not None:
auth.login(request,user)
print('login successful')
return redirect('/')
else:
print("wrong credentials")
return render(request,'login.html')
def logout(request):
auth.logout(request)
print("logged out")
return redirect('/')
--- FILE SEPARATOR ---
from django.contrib import admin
from store.models import Product,Cart,Wishlist,Contact,events,Journal,Donations
# Register your models here.
admin.site.register(Product)
admin.site.register(Cart)
admin.site.register(Wishlist)
admin.site.register(Contact)
admin.site.register(events)
admin.site.register(Journal)
admin.site.register(Donations)
--- FILE SEPARATOR ---
# Generated by Django 2.2 on 2020-10-31 16:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300, null=True)),
('details', models.TextField(max_length=300, null=True)),
],
),
migrations.CreateModel(
name='Donations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.CharField(max_length=300, null=True)),
('email', models.CharField(max_length=300, null=True)),
('phone_number', models.CharField(max_length=300, null=True)),
('address', models.CharField(max_length=300, null=True)),
('clothes_number', models.CharField(max_length=300, null=True)),
],
),
migrations.CreateModel(
name='events',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=300, null=True)),
('organizer_name', models.CharField(max_length=300, null=True)),
('details', models.TextField(max_length=300, null=True)),
('phone_number', models.IntegerField(blank=True)),
('email', models.CharField(max_length=300, null=True)),
],
),
migrations.CreateModel(
name='Journal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('img_front', models.ImageField(blank=True, upload_to='')),
('img', models.ImageField(blank=True, upload_to='')),
('category', models.CharField(max_length=300, null=True)),
('title', models.CharField(max_length=300, null=True)),
('author', models.CharField(max_length=300, null=True)),
('details', models.CharField(max_length=300, null=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mainimage', models.ImageField(blank=True, upload_to='')),
('img1', models.ImageField(blank=True, upload_to='')),
('img2', models.ImageField(blank=True, upload_to='')),
('img3', models.ImageField(blank=True, upload_to='')),
('price', models.FloatField()),
('studio_name', models.CharField(max_length=300, null=True)),
('size', models.CharField(max_length=300, null=True)),
('gender', models.CharField(max_length=300, null=True)),
('category', models.CharField(max_length=300, null=True)),
('rent_price', models.FloatField(null=True)),
('count', models.IntegerField(default=0)),
('rented', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Wishlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='store.Product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='store.Product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
--- FILE SEPARATOR ---
# Generated by Django 2.2 on 2020-10-31 20:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='journal',
name='details',
field=models.TextField(max_length=1000, null=True),
),
]
--- FILE SEPARATOR ---
# Generated by Django 2.2 on 2020-10-31 20:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0002_auto_20201101_0205'),
]
operations = [
migrations.AddField(
model_name='journal',
name='content',
field=models.TextField(max_length=1000, null=True),
),
migrations.AddField(
model_name='journal',
name='date',
field=models.DateField(null=True),
),
]
--- FILE SEPARATOR ---
# Generated by Django 2.2 on 2020-10-31 21:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0003_auto_20201101_0222'),
]
operations = [
migrations.RemoveField(
model_name='events',
name='details',
),
migrations.AddField(
model_name='events',
name='bio',
field=models.TextField(max_length=1000, null=True),
),
]
--- FILE SEPARATOR ---
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Product(models.Model):
mainimage = models.ImageField(blank=True)
img1 = models.ImageField(blank=True)
img2 = models.ImageField(blank=True)
img3 = models.ImageField(blank=True)
# category = models.ForeignKey(Category, on_delete=models.CASCADE)
# detail_text = models.TextField(max_length=1000, verbose_name='Detail Text')
price = models.FloatField()
studio_name = models.CharField(max_length=300,null=True)
size = models.CharField(max_length=300,null=True)
gender = models.CharField(max_length=300,null=True)
category = models.CharField(max_length=300,null=True)
rent_price = models.FloatField(null=True)
count = models.IntegerField(default=0)
rented = models.BooleanField(default=False)
def __str__(self):
return self.category
class events(models.Model):
name = models.CharField(max_length=300,null=True)
organizer_name = models.CharField(max_length=300,null=True)
bio = models.TextField(max_length=1000,null=True)
#image = models.IntegerField(blank=True)
#link = models.CharField(max_length=300,null=True)
phone_number = models.IntegerField(blank=True)
email = models.CharField(max_length=300,null=True)
venue = models.CharField(max_length=300,null=True)
date = models.DateField(null=True)
def __str__(self):
return self.name
class Journal(models.Model):
img_front = models.ImageField(blank=True)
img = models.ImageField(blank=True)
category = models.CharField(max_length=300,null=True)
title = models.CharField(max_length=300,null=True)
date = models.DateField(null=True)
author = models.CharField(max_length=300,null=True)
details = models.TextField(max_length=1000,null=True)
content = models.TextField(max_length=1000,null=True)
def __str__(self):
return self.title
class Contact(models.Model):
name = models.CharField(max_length=300,null=True)
details = models.TextField(max_length=300,null=True)
def __str__(self):
return self.name
class Cart(models.Model):
item = models.ForeignKey(Product, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.item.category
class Wishlist(models.Model):
item = models.ForeignKey(Product, on_delete=models.DO_NOTHING)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.item.category
class Donations(models.Model):
Name = models.CharField(max_length=300,null=True)
email = models.CharField(max_length=300,null=True)
phone_number = models.CharField(max_length=300,null=True)
address = models.CharField(max_length=300,null=True)
clothes_number = models.CharField(max_length=300,null=True)
--- FILE SEPARATOR ---
"""WASP URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('',views.index,name='index'),
path('eventform/',views.eventform,name='eventform'),
path('eventpage/',views.eventpage,name='eventpage'),
path('event/<int:id>',views.event,name='event'),
path('journal/',views.journal,name='journals'),
path('journal/<int:id>',views.journal_page,name='journal_page'),
path('products/<int:id>/',views.product,name='product'),
path('cart/',views.showcart,name='cart'),
path('addcart/<int:id>',views.addcart,name='addcart'),
path('buy/<int:id>',views.buy,name='buy'),
path('buycart/',views.buycart,name='buycart'),
path('showWishlist/',views.showWishlist,name='showWishlist'),
path('addWishlist/<int:id>',views.addWishlist,name='addWishlist'),
path('removeWishlist/<int:id>',views.removeWishlist,name='removeWishlist'),
path('donation/',views.donation,name='donation'),
path('products/<str:gender>/<str:category>',views.genderCategory,name='genderCategory'),
path('aboutus/',views.aboutus,name='aboutus'),
# path('<str:gender>/<str:category>',views.,name='menbottom'),
# path('<str:gender>/<str:category>',views.,name='menfootware'),
# path('<str:gender>/<str:category>',views.,name='menaccessories'),
# path('women/<str:category>',views.,name='womenshirt'),
# path('women/bottom',views.,name='womenbottom'),
# path('women/footware',views.,name='womenfootware'),
# path('women/accessories',views.,name='womenaccessories'),
# path('kids/shirt',views.,name='kidsshirt'),
# path('kids/bottom',views.,name='kidsbottom'),
# path('kids/footware',views.,name='kidsfootware'),
# path('kids/accessories',views.,name='kidsaccessories'),
# path('fluids/shirt',views.,name='fluidsshirt'),
# path('fluids/bottom',views.,name='fluidsbottom'),
# path('fluids/footware',views.,name='fluidsfootware'),
# path('fluids/accessories',views.,name='fluidsaccessories'),
]
# shirt
# jeans
# footware
# sheatshirts
# jackets
# fitness
# tshirts
# ethnic
# men, women, kid, fluids
--- FILE SEPARATOR ---
from django.shortcuts import render,redirect
from .models import Contact,Journal,Product,Cart,Wishlist,events,Donations
# Create your views here.
def index(request):
if request.method=='POST':
email = request.POST['email']
message = request.POST['Message']
contact = Contact.objects.create(name=email,details=message)
contact.save()
# product = Product.objects.all()
# context = {
# 'product':product,
# }
return render(request,'index.html')
def eventform(request):
if request.method=='POST':
username = request.POST['Event Name']
email = request.POST['email']
phone_number = request.POST['phone']
organization = request.POST['Organisation']
date = request.POST['date']
venue = request.POST['venue']
bio = request.POST['Bio']
event = events.objects.create(name=username,organizer_name=organization,bio=bio,phone_number=phone_number,email=email,venue=venue,date=date)
event.save()
return redirect('/eventform')
return render(request,'eventreg.html')
def eventpage(request):
event = events.objects.all()
context = {
'events':event,
}
return render(request,'events.html',context=context)
def event(request,id):
event = events.objects.get(id=id)
context = {
'event':event,
}
return render(request,'event.html',context=context)
def journal(request):
journals = Journal.objects.all()
context = {
"journals":journals,
}
return render(request,'journal.html',context=context)
def journal_page(request,id):
journal = Journal.objects.get(id=id)
context = {
'journal':journal,
}
return render(request,'journal-page.html',context=context)
def aboutus(request):
return render(request,'aboutus.html')
# def products(request):
# products = Product.objects.all()
# context = {
# "products":products,
# }
# return render(request,'products.html',context=context)
def product(request,id):
product = Product.objects.get(id=id)
context = {
"product":product,
}
return render(request,'product.html',context=context)
def showcart(request):
cart = Cart.objects.filter(user=request.user)
context = {
'cart':cart,
}
return render(request,'cart.html',context=context)
def addcart(request,id):
product = Product.objects.get(id=id)
Cart.objects.create(item=product,user=request.user)
return redirect('/')
def buy(request,id):
product = Product.objects.get(id=id)
product.count-=1
if product.count<0:
product.count=0
product.save()
return redirect('/')
def buycart(request):
cart = Cart.objects.filter(user=request.user)
for item in cart:
item.item.count-=1
if item.item.count<0:
item.item.count=0
item.item.save()
cart = Cart.objects.filter(user=request.user).delete()
return redirect('/')
def showWishlist(request):
wishlist = Wishlist.objects.filter(user=request.user)
context = {
'wishlist':wishlist,
}
return render(request,'wishlist.html',context=context)
def addWishlist(request,id):
product = Product.objects.get(id=id)
Wishlist.objects.create(item=product,user=request.user)
return redirect('/')
def removeWishlist(request,id):
product = Product.objects.get(id=id)
Wishlist.objects.get(item=product).delete()
return redirect('showWishlist/')
#remove cart feature
def genderCategory(request,gender,category):
product = Product.objects.filter(gender=gender,category=category)
context = {
"product":product,
"gender":gender,
"category":category,
}
return render(request,'sproducts.html',context=context)
def donation(request):
if request.method=='POST':
name = request.POST['name']
email = request.POST['email']
phone_number = request.POST['phone']
address = request.POST['address']
clothes_number = request.POST['clothes']
donation = Donations.objects.create(phone_number=phone_number,email=email,Name=name,address=address,clothes_number=clothes_number)
donation.save()
return render(request,'donations.html')
|
[
"/accounts/views.py",
"/store/admin.py",
"/store/migrations/0001_initial.py",
"/store/migrations/0002_auto_20201101_0205.py",
"/store/migrations/0003_auto_20201101_0222.py",
"/store/migrations/0004_auto_20201101_0245.py",
"/store/models.py",
"/store/urls.py",
"/store/views.py"
] |
0-Yzx/FEELVOS
|
from itertools import combinations
from cv2 import cv2
import os
import natsort
import pandas as pd
import numpy as np
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import ToPILImage
from torchvision import transforms, utils
from feelvos.transform import preprocessing
class FEELVOSTriple(Dataset):
def __init__(self, root='./data/', split='train', transform=None):
super().__init__()
self.root = root
self.split = split
self.transform = transform
self.folder_list = []
self.items = []
folder_f = open(os.path.join(root, self.split+"_folder_list.txt"), "r")
for x in folder_f:
self.folder_list.append(x[:-1])
for i in range(len(self.folder_list)):
tmp_list = natsort.natsorted(os.listdir(os.path.join(root, 'image', self.folder_list[i])))
for j in range(len(tmp_list) - 2):
first = tmp_list[j]
for k in range(len(tmp_list[j+1:])-1):
comb_1 = tmp_list[k+1]
comb_2 = tmp_list[k+2]
self.items.append((os.path.join(self.root, 'image', self.folder_list[i], first), os.path.join(self.root, 'image', self.folder_list[i], comb_1), os.path.join(self.root, 'image', self.folder_list[i], comb_2)))
def __getitem__(self, index):
src = []
mask = []
seltem = self.items[index]
for i in range(3):
src.append(cv2.imread(seltem[i]))
mask.append(cv2.imread(os.path.join(seltem[i].split('/')[1], 'mask', seltem[i].split('/')[3], seltem[i].split('/')[4])))
sample = (src, mask)
if self.transform is None:
pass
else:
sample = self.transform(*sample)
if self.split == 'train':
sample[0][0] = sample[1][0]
sample[0][1] = sample[1][1]
return sample
def __len__(self):
return len(self.items)
if __name__ == "__main__":
ds_train = FEELVOSTriple(root='./data/', split='train', transform=preprocessing)
ds_test = FEELVOSTriple(root='./data/', split='test', transform=preprocessing)
print("DATA LOADED")
--- FILE SEPARATOR ---
import torch
import torch.nn as nn
from feelvos.models.Embeddings import DepthwiseSeparableConv2D
class DynamicSegmentationHead(nn.Module):
def __init__(self, cin, cout):
super(DynamicSegmentationHead, self).__init__()
self.depthwise_l = DepthwiseSeparableConv2D(cin, 256, 7)
self.depthwise_r = DepthwiseSeparableConv2D(256, 256, 7)
self.conv = nn.Conv2d(256, cout, 1)
def forward(self, x):
x = self.depthwise_l(x)
x = self.depthwise_r(x)
x = self.depthwise_r(x)
x = self.depthwise_r(x)
x = nn.ReLU(inplace=True)(x)
x = self.conv(x)
x = nn.Softmax2d()(x)
return x
--- FILE SEPARATOR ---
import torch
import torch.nn as nn
import torch.nn.functional as F
from modelsummary import summary
class DepthwiseSeparableConv2D(nn.Module):
def __init__(self, c_in, c_out, kernel_size=1, stride=1, padding=0, dilation=1, bias=False):
super(DepthwiseSeparableConv2D,self).__init__()
self.conv1 = nn.Conv2d(c_in, c_in, kernel_size, stride, padding, dilation, groups=c_in, bias=bias)
self.pointwise = nn.Conv2d(c_in, c_out, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class PixelwiseEmbedding(nn.Module):
def __init__(self, c_in, c_out_1, c_out_2):
super(PixelwiseEmbedding, self).__init__()
self.separable = DepthwiseSeparableConv2D(c_in=c_in, c_out=c_out_1, kernel_size=3, stride=1, padding=1)
self.conv1 = nn.Conv2d(c_out_1, c_out_2, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x = self.separable(x)
x = self.conv1(x)
return x
--- FILE SEPARATOR ---
from cv2 import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from modelsummary import summary
from feelvos.models.Backbone import UNet
from feelvos.models.Embeddings import PixelwiseEmbedding
from feelvos.models.DynamicSegmentationHead import DynamicSegmentationHead
from feelvos.models.Matching import global_matching, local_matching
class FEELVOS(nn.Module):
def __init__(self, c_in, n_classes, use_gt=True, pretrained=None):
super(FEELVOS, self).__init__()
self.n_classes = n_classes
self.use_gt = use_gt
self.backbone = None
if pretrained is not None and self.backbone is None:
self.backbone = UNet(c_in, n_classes)
self.backbone.load_state_dict(torch.load(pretrained))
self.backbone.eval()
self.embedding = PixelwiseEmbedding(n_classes, n_classes, 100)
self.dsh = DynamicSegmentationHead(n_classes+1+1+1, 1)
def forward(self, x_list):
x1 = x_list[0]
x2 = x_list[1]
x3 = x_list[2]
if self.use_gt == False:
with torch.no_grad():
x1 = self.backbone(x1)
x2 = self.backbone(x2)
with torch.no_grad():
x3 = self.backbone(x3)
x1_l = []; x1_e = []
x2_l = []; x2_e = []
x3_l = []; x3_e = []
gm = []; lm = []
logits = []
x1 = F.interpolate(x1, 32)
x2 = F.interpolate(x2, 32)
x3 = F.interpolate(x3, 32)
for i in range(self.n_classes):
x1_l.append(x1[:, i, :, :].unsqueeze(1))
x1_e.append(self.embedding(x1_l[i]))
x2_l.append(x2[:, i, :, :].unsqueeze(1))
x2_e.append(self.embedding(x2_l[i]))
x3_l.append(x3[:, i, :, :].unsqueeze(1))
x3_e.append(self.embedding(x3_l[i]))
with torch.no_grad():
gm.append(global_matching(x1_e[i], x3_e[i]))
lm.append(global_matching(x2_e[i], x3_e[i]))
x_t = torch.cat((x3, gm[i].cuda(), lm[i].cuda(), x2_l[i]), dim=1)
logits.append(self.dsh(x_t))
x = None
for i in range(self.n_classes):
if i == 0:
x = logits[i]
else:
x = torch.cat((logits[i-1], logits[i]), dim=1)
return x
if __name__ == "__main__":
device = torch.device("cuda:0")
model = FEELVOS(3, 1, use_gt=False).cuda(device=device)
# summary(model, torch.zeros((1, 3, 512, 512)).cuda(), show_input=True)
# summary(model, torch.zeros((1, 3, 512, 512)).cuda(), show_input=False)
x1 = cv2.imread('example/x2.png')
x2 = cv2.imread('example/x3.png')
x1 = cv2.resize(x1, dsize=(256, 256))
x1 = torchvision.transforms.ToTensor()(x1)
x1 = x1.unsqueeze(0).to(device=device)
x2 = cv2.resize(x2, dsize=(256, 256))
x2 = torchvision.transforms.ToTensor()(x2)
x2 = x2.unsqueeze(0).to(device=device)
x = torch.cat((x1, x2), dim=0)
y = model(x, x, x)
print(y)
--- FILE SEPARATOR ---
from cv2 import cv2
import torch
import torch.nn as nn
import torchvision
from torch.autograd.variable import Variable
from .correlation_package.correlation import Correlation
def distance(p, q):
ps = torch.sum(p * p)
qs = torch.sum(q * q)
norm = torch.norm(ps-qs, p=2, dim=-1)
res = 1 - (2 / (1 + torch.exp(norm)))
return res
def global_matching(x, y):
output = torch.zeros(x.size(0), 1, x.size(2), x.size(3))
for i in range(x.size(0)):
for j in range(x.size(2)):
for k in range(x.size(3)):
output[i, :, j, k] = distance(x[i, :, j, k], y[i, :, j, k])
return output
def local_matching(x, y, window):
output = torch.zeros(x.size(0), 1, x.size(2), x.size(3))
# out_corr = Correlation(pad_size=6, kernel_size=window, max_displacement=0, stride1=1, stride2=1, corr_multiply=1)(x, y)
return output
--- FILE SEPARATOR ---
import random
import torch
import torch.nn as nn
import torchvision
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from feelvos.models.Backbone import UNet
from feelvos.dataset import FEELVOSTriple
from feelvos.transform import preprocessing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
if __name__ == "__main__":
target_folder = './data/'
ds_test = FEELVOSTriple(root='./data/', split='test', transform=preprocessing)
loc = './unet/weight010'
model = UNet(3, 1)
model.load_state_dict(torch.load(loc+'.pt'))
model = model.to(device)
model.eval()
pick = []
for i in range(1):
pick.append(random.randrange(0, 500, 1))
for i in pick:
X, y = ds_test.__getitem__(i)
torchvision.utils.save_image(X[0], './testimage/'+str(i)+'_X'+'.png')
torchvision.utils.save_image(y[0], './testimage/'+str(i)+'_y'+'.png')
X = X[0].view(1, 3, 256, 256).cuda()
y_pred = model(X)
torchvision.utils.save_image(y_pred, './testimage/'+loc.split('/')[-1]+'_'+str(i)+'_ypred'+'.png')
--- FILE SEPARATOR ---
import argparse
from feelvos.dataset import FEELVOSTriple
from feelvos.transform import preprocessing
from feelvos.models.FEELVOS import FEELVOS
from feelvos.loss import dice_loss
from feelvos.metric import dice_coeff
from feelvos.trainer import Trainer
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
parser = argparse.ArgumentParser()
parser.add_argument(
'--batch_size', type=int, default=7
)
parser.add_argument(
'--epoch', type=int, default=40
)
parser.add_argument(
'--lr', type=float, default=0.001
)
parser.add_argument(
'--dataset', type=str, default='./data/'
)
parser.add_argument(
'--workers', type=int, default=4
)
cfg = parser.parse_args()
print(cfg)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
if __name__ == "__main__":
ds_train = FEELVOSTriple(root='./data/', split='train', transform=preprocessing)
ds_test = FEELVOSTriple(root='./data/', split='test', transform=preprocessing)
dl_train = DataLoader(ds_train, batch_size=cfg.batch_size, shuffle=True, num_workers=cfg.workers)
dl_test = DataLoader(ds_test, batch_size=cfg.batch_size, shuffle=False, num_workers=cfg.workers)
print("DATA LOADED")
model = FEELVOS(3, 1, use_gt=True, pretrained='./unet/weight010.pt')
optimizer = torch.optim.Adam(model.parameters(), lr=cfg.lr)
criterion = nn.BCELoss()
success_metric = nn.BCELoss()
summary = SummaryWriter()
trainer = Trainer(model, criterion, optimizer, success_metric, device, None, False)
fit = trainer.fit(dl_train, dl_test, num_epochs=cfg.epoch, checkpoints='./save2/'+model.__class__.__name__+'.pt')
torch.save(model.state_dict(), './save/final_state_dict.pt')
torch.save(model, './save/final.pt')
loss_fn_name = "cross entropy"
best_score = str(fit.best_score)
print(f"Best loss score(loss function = {loss_fn_name}): {best_score}")
--- FILE SEPARATOR ---
from cv2 import cv2
import torchvision.transforms as transforms
def preprocessing(images, masks):
fin_images = []
fin_masks = []
image_transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
for i in range(len(images)):
tmp_i = cv2.resize(images[i], dsize=(256, 256), interpolation=cv2.INTER_AREA)
tmp_m = cv2.resize(masks[i], dsize=(256, 256), interpolation=cv2.INTER_AREA)
tmp_m = cv2.cvtColor(tmp_m, cv2.COLOR_BGR2GRAY)
for x in range(tmp_m.shape[0]):
for y in range(tmp_m.shape[1]):
if tmp_m[y, x] == 29:
tmp_m[y, x] = 255
fin_images.append(image_transform(tmp_i).float())
fin_masks.append(image_transform(tmp_m).float())
return fin_images, fin_masks
--- FILE SEPARATOR ---
import torch
def list_to_tensor(t_list, x, y, device):
for i in range(x):
for j in range(y):
t_list[i][j] = torch.from_numpy(t_list[i][j]).to(device=device)
return t_list
--- FILE SEPARATOR ---
from setuptools import setup, find_packages
setup(
name = 'feelvos',
version = '0.5',
description = 'FEELVOS implementation in PyTorch; FEELVOS: Fast End-to-End Embedding Learning for Video Object Segmentation',
author = 'Younghan Kim',
author_email = '[email protected]',
install_requires= [],
packages = find_packages(),
python_requires = '>=3.6'
)
|
[
"/feelvos/dataset.py",
"/feelvos/models/DynamicSegmentationHead.py",
"/feelvos/models/Embeddings.py",
"/feelvos/models/FEELVOS.py",
"/feelvos/models/Matching.py",
"/feelvos/test.py",
"/feelvos/train.py",
"/feelvos/transform.py",
"/feelvos/util/toTensor.py",
"/setup.py"
] |
0-gpa-gang/NumRoll
|
import sqlite3
def create():
conn = sqlite3.connect('image.db')
c = conn.cursor()
c.execute("""DROP TABLE image""")
c.execute("""CREATE TABLE image (
path TEXT PRIMARY KEY,
classifier INTEGER DEFAULT "N/A"
)""")
c.execute("""INSERT INTO image (path)
VALUES
('image/0.jpeg'),
('image/1.jpeg'),
('image/2.jpeg'),
('image/3.jpeg'),
('image/4.jpeg');""")
conn.commit()
if __name__ == "__main__":
create()
--- FILE SEPARATOR ---
class Image:
def __init__(self, path, classifier):
self.path = path
self.classifier = classifier
--- FILE SEPARATOR ---
import sys
from PyQt5 import QtCore, QtGui, uic, QtWidgets
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QPushButton,QAction, QShortcut
from PyQt5.QtGui import QIcon, QKeySequence
from PyQt5.QtCore import Qt,pyqtSlot
class Canvas(QtWidgets.QMainWindow):
def __init__(self, index):
super().__init__()
self.label = QtWidgets.QLabel()
self.whiteboard = QtGui.QPixmap(280,280)
#self.setStyleSheet("background-color: black;")
self.label.setPixmap(self.whiteboard)
self.setCentralWidget(self.label)
self.index = index
#self.count = 0
self.last_x, self.last_y = None, None
def mouseMoveEvent(self, e):
if self.last_x is None:
self.last_x = e.x()
self.last_y = e.y()
return
cursor = QtGui.QPainter(self.label.pixmap())
p = QtGui.QPen()
p.setWidth(12)
p.setColor(QtGui.QColor('#FFFFFF'))
cursor.setPen(p)
cursor.drawLine(self.last_x, self.last_y, e.x(), e.y())
cursor.end()
self.update()
# update the origin for the next event
self.last_x = e.x()
self.last_y = e.y()
def mouseReleaseEvent(self, e):
self.last_x = None
self.last_y = None
def save(self):
p = QWidget.grab(self)
p_resized = p.scaled(28,28,QtCore.Qt.KeepAspectRatio, transformMode=QtCore.Qt.SmoothTransformation)
fileName = "image/"+ str(self.index) +".jpeg"
p_resized.save(fileName, 'JPEG')
print("image saved!")
self.close()
def save_all(lst_wind):
for i in lst_wind:
i.save()
def canvases():
app = QtWidgets.QApplication(sys.argv)
windows = []
shortcuts = []
for i in range(5):
windows.append(Canvas(i))
windows[i].setWindowFlags(QtCore.Qt.FramelessWindowHint)
windows[i].move(340+i*300,400)
shortcuts.append(QShortcut(QKeySequence('Ctrl+S'), windows[i]))
shortcuts[i].activated.connect(lambda: save_all(windows))
for i in range(5):
windows[i].show()
app.exec_()
if __name__ == "__main__":
canvases()
--- FILE SEPARATOR ---
import numpy as np
import tensorflow as tf
from PIL import Image
from io_file import *
from tensorflow import keras
from tensorflow.keras.models import load_model
from Database import *
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
class Classify:
def __init__(self):
self.model = load_model("NumRoll.h5")
def classify(self, np_arr):
prediction = self.model.predict(np.array([np_arr]))
return np.argmax(prediction)
def classify_all(self, lst):
num_list = []
for i in lst:
num_list.append(int(self.classify(i)))
return num_list
class DataSet:
def __init__(self):
self.position = read_from_db() # a list of string locations
self.num_array = [] #a list of numpy arrays
def get_num_array(self):
return self.num_array
def image_to_array(self):
total_arrays = []
for i in self.position:
image = Image.open(i)
data = np.array(image).astype('float32')/255.0
data = np.sum(data, axis=-1)/data.shape[-1]
total_arrays.append(data)
self.num_array = total_arrays
def classify_and_save():
create()
data = DataSet()
data.image_to_array()
print(data.num_array)
classifier = Classify()
final = classifier.classify_all(data.num_array)
print(final)
output_to_db(final)
if __name__ == "__main__":
classify_and_save()
--- FILE SEPARATOR ---
import sys
import os
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot, Qt
from PyQt5 import uic
app = QApplication(sys.argv)
failWindow = QWidget()
failWindow.setWindowTitle("Error!")
failWindow.setGeometry(150,150,800,300)
failWindow.move(560,560)
failmsg = QLabel('<h2>WRONG CODE! DENIED ACCESS</h2>', parent = failWindow)
failmsg.move(60,60)
failWindow.show()
sys.exit(app.exec_())
--- FILE SEPARATOR ---
import sqlite3
import os
# import the following lines to the main py file
# conn = sqlite3.connect("image.db")
# c = conn.cursor()
def read_from_db():
conn = sqlite3.connect("image.db")
c = conn.cursor()
c.execute("SELECT * FROM image")
total = []
for row in c.fetchall():
total.append(row[0])
return total
def output_to_db(classify):
conn = sqlite3.connect("image.db")
c = conn.cursor()
total = read_from_db()
for i in range(len(classify)):
num = classify[i]
location = total[i]
c.execute("UPDATE image SET classifier = (?) WHERE path = (?)", (num, location))
conn.commit()
# if want to see the classified result in a printed list, turn docstring into code
"""
classified = []
c.execute("SELECT * FROM image")
for row in c.fetchall():
classified.append(row[1])
print(classified)
"""
def special_case():
conn = sqlite3.connect("image.db")
c = conn.cursor()
c.execute("SELECT * FROM image")
special = ""
for row in c.fetchall():
special += str(row[1])
if special == "42069":
os.system("vlc RickRoll.mp4") # change with system
--- FILE SEPARATOR ---
import sys
import os
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot, Qt
from PyQt5 import uic
import numpy as np
from classifier import *
from canvas import *
import sqlite3
def window():
# create instance of QApplication
# sys.argv contains command link arguments
app = QApplication(sys.argv)
#create the GUI
widget = QWidget()
widget.setWindowTitle("NumRoll")
# (x,y,width, height)
widget.setGeometry(150,150,1500,700)
widget.move(1170, 330)
welcomemsg = QLabel('<h1>Your Homework is Locked!</h1>', parent=widget)
welcomemsg.move(350,60)
instruction = QLabel('<h3>Toggle your mouse to write down your 5-bit passcode</h3>', parent = widget)
instruction.move(250,120)
instruction2 = QLabel('<h3>When you are done, Press "Ctrl+S" to proceed.</h3>', parent = widget)
instruction2.move(340,600)
# make the buttons
start = QPushButton(widget)
start.setStyleSheet("background-color:red")
start.setText("Click here to start.")
start.move(600,180)
start.clicked.connect(start_pushed)
# show the window
widget.show()
# execute the program
sys.exit(app.exec_())
def start_pushed():
os.system("python3 canvas.py")
classify_and_save()
compare('12345')
def compare(passcode):
conn = sqlite3.connect("image.db")
c = conn.cursor()
c.execute("""SELECT classifier FROM image""")
#print(str(c.fetchall()))
code = []
for i in c.fetchall():
code.append(str(i[0]))
a = "".join(code)
print("You have entered: "+a)
if a == passcode:
os.system("vim homework.txt")
sys.exit()
elif a == "42069":
os.system("vlc env/RickRoll.mp4")
else:
print("Wrong code")
os.system("python3 error.py")
if __name__ == "__main__":
window()
--- FILE SEPARATOR ---
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten, BatchNormalization
from tensorflow.keras.regularizers import l1, l2
from tensorflow.keras.datasets import mnist
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.utils import to_categorical
from PIL import Image
from tensorflow.keras.mixed_precision import experimental as mixed_precision
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
class MLModel:
def __init__(self):
self.inputs = keras.Input(shape=(28, 28, 1))
self.x = self.conv_module(self.inputs, f=32, ks=(5, 5), s=(1, 1), p="same", a="relu", kr=l2(0.001), br=l2(0.001), do=0.4, mp=True)
self.x = BatchNormalization(-1)(self.x)
#self.x = self.conv_module(self.inputs, f=16, ks=(3, 3), s=(1, 1), p="same", a="relu", kr=l2(0.001), br=l2(0.001), do=0.4, mp=True)
#self.x = BatchNormalization(-1)(self.x)
#self.x = self.conv_module(self.inputs, f=32, ks=(3, 3), s=(1, 1), p="same", a="relu", kr=l2(0.001), br=l2(0.001), do=0.4, mp=True)
#self.x = BatchNormalization(-1)(self.x)
self.x = self.flatten_module(self.x)
self.x = BatchNormalization(-1)(self.x)
self.x = self.dense_module(self.x, u=50, a="relu", kr=l2(0.001), br=l2(0.001))
self.x = BatchNormalization(-1)(self.x)
self.x = self.dense_module(self.x, u=10, a="softmax", kr=l2(0.001), br=l2(0.001))
self.outputs = self.x
def conv_module(self, x, f, ks, s, p, a, kr, br, do=None, mp=False):
x = Conv2D(filters=f, kernel_size=ks, strides=s, padding=p, activation=a, kernel_regularizer=kr, bias_regularizer=br)(x)
if mp:
x = MaxPooling2D(pool_size=(2, 2))(x)
if do != None:
x = Dropout(do)(x)
return x
def flatten_module(self, x):
x = Flatten()(x)
x = Dense(100, activation="relu", kernel_regularizer=l2(0.001), bias_regularizer=l2(0.001))(x)
x = Dropout(0.5)(x)
return x
def dense_module(self, x, u, a, kr, br, do=None):
x = Dense(units=u, activation=a, kernel_regularizer=kr, bias_regularizer=br)(x)
return x
def define_model(self):
self.model = keras.Model(inputs=self.inputs, outputs=self.outputs, name="mnist_model")
def compile_model(self, optimizer, loss, metrics):
self.model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
def train():
mlmodel = MLModel()
mlmodel.define_model()
mlmodel.compile_model(optimizer=SGD(lr=0.0007, momentum=0.9), loss="categorical_crossentropy", metrics=['accuracy'])
(trainX, trainY), (testX, testY) = mnist.load_data()
trainX = trainX.reshape((trainX.shape[0], 28, 28, 1)).astype("float32")
testX = testX.reshape((testX.shape[0], 28, 28, 1)).astype("float32")
trainX /= 255
testX /= 255
trainY = to_categorical(trainY)
testY = to_categorical(testY)
mlmodel.model.fit(x=trainX, y=trainY, batch_size=None, epochs=60, verbose=1, validation_data=(testX, testY), use_multiprocessing=True)
mlmodel.model.save("NumRoll.h5")
if __name__ == "__main__":
train()
|
[
"/Database.py",
"/Images.py",
"/canvas.py",
"/classifier.py",
"/error.py",
"/io_file.py",
"/main.py",
"/training.py"
] |
0-jam/azfunc
|
import logging
import azure.functions as func
from .monkey_generator import generate_text
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python monkey text generator.')
gen_size = req.params.get('gen_size')
if not gen_size:
try:
req_body = req.get_json()
except ValueError:
pass
else:
gen_size = req_body.get('gen_size')
if gen_size:
return func.HttpResponse(generate_text(int(gen_size)))
else:
return func.HttpResponse(
"Please pass a gen_size on the query string or in the request body",
status_code=400
)
--- FILE SEPARATOR ---
import random
# All characters on the keyboard as integers
CHARS = list(range(32, 128)) + [8, 9, 10]
def shuffle(orig_list):
return random.sample(orig_list, k=len(orig_list))
def generate_text(gen_size=100):
generated_text = ''
for _ in range(gen_size):
generated_text += chr(shuffle(CHARS)[0])
return generated_text
--- FILE SEPARATOR ---
import logging
import azure.functions as func
from .sql_controller import get_places
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
return func.HttpResponse(format(get_places()), mimetype='application/json')
--- FILE SEPARATOR ---
import json
import os
import pyodbc
ENV = os.environ
DB_ENDPOINT = ENV.get('SQL_DB_ENDPOINT')
DB_NAME = ENV.get('SQL_DB_NAME')
DB_USERNAME = ENV.get('SQL_DB_USERNAME')
DB_PASSWORD = ENV.get('SQL_DB_PASSWORD')
SQL_DRIVER = '{ODBC Driver 17 for SQL Server}'
def establish_connection() -> pyodbc.Connection:
return pyodbc.connect('DRIVER=' + SQL_DRIVER + ';SERVER=' + DB_ENDPOINT + ';PORT=1433;DATABASE=' + DB_NAME + ';UID=' + DB_USERNAME + ';PWD=' + DB_PASSWORD)
def exec_sql(query: str) -> list:
with establish_connection() as connection:
with connection.cursor() as cursor:
cursor.execute(query)
column_names = [desc[0] for desc in cursor.description]
try:
rows = cursor.fetchall()
return [dict(zip(column_names, row)) for row in rows]
except pyodbc.ProgrammingError:
return [{'message': 'affected {} rows'.format(cursor.rowcount)}]
finally:
connection.commit()
def get_places():
rows = exec_sql('select * from dbo.places')
# decimal 型の latitude, longitude を float 型にシリアライズしている
return json.dumps(rows, ensure_ascii=False, default=float)
--- FILE SEPARATOR ---
import os
import pyodbc
import json
ENV = os.environ
DB_ENDPOINT = ENV.get('SQL_DB_ENDPOINT')
DB_NAME = ENV.get('SQL_DB_NAME')
DB_USERNAME = ENV.get('SQL_DB_USERNAME')
DB_PASSWORD = ENV.get('SQL_DB_PASSWORD')
SQL_DRIVER = '{ODBC Driver 17 for SQL Server}'
def establish_connection():
return pyodbc.connect('DRIVER=' + SQL_DRIVER + ';SERVER=' + DB_ENDPOINT + ';PORT=1433;DATABASE=' + DB_NAME + ';UID=' + DB_USERNAME + ';PWD=' + DB_PASSWORD)
def rows2json(rows):
return json.dumps([tuple(row) for row in rows], ensure_ascii=False)
def exec_sql():
connection = establish_connection()
cursor = connection.cursor()
cursor.execute("SELECT TOP 20 pc.Name as CategoryName, p.name as ProductName FROM [SalesLT].[ProductCategory] pc JOIN [SalesLT].[Product] p ON pc.productcategoryid = p.productcategoryid")
try:
rows = cursor.fetchall()
result_json = rows2json(rows)
except pyodbc.ProgrammingError:
rows = cursor.rowcount
result_json = json.dumps("affected {} rows".format(cursor.rowcount))
cursor.close()
connection.close()
return result_json
|
[
"/azmonkeygen/__init__.py",
"/azmonkeygen/monkey_generator.py",
"/get-places/__init__.py",
"/get-places/sql_controller.py",
"/sqlcontroller/sql_controller.py"
] |
0-jam/utanet_scraper
|
import argparse
import json
from pathlib import Path
def main():
parser = argparse.ArgumentParser(description='utanet_scraper.pyで抽出した曲情報から特定の項目を抽出')
parser.add_argument('input', type=str, help='入力ディレクトリ名')
parser.add_argument('output', type=str, help='出力ファイル名')
parser.add_argument('-a', '--attribute', type=str, default='lyric', choices=['title', 'artist', 'lyricist', 'composer', 'lyric'], help="抽出したい項目(デフォルト:'lyric')")
parser.add_argument('--allow_dups', action='store_true', help='項目の重複を許容(デフォルト:false)')
args = parser.parse_args()
extracted_values = []
for json_path in Path(args.input).iterdir():
with json_path.open() as json_file:
json_dict = json.load(json_file)
extracted_values.extend([value[args.attribute] for value in json_dict.values()])
if not args.allow_dups:
extracted_values = set(extracted_values)
with Path(args.output).open('w', encoding='utf-8') as out:
out.write('\n'.join(extracted_values))
if __name__ == "__main__":
main()
--- FILE SEPARATOR ---
import time
import urllib
from beautifulscraper import BeautifulScraper
from tqdm import tqdm
scraper = BeautifulScraper()
domain = 'https://www.uta-net.com'
attributes = {
# 歌手名
'artist': '1',
# 曲名
'title': '2',
# 作詞者名
'lyricist': '3',
# 作曲者名
'composer': '8',
}
match_modes = {
# 完全一致
'exact': '4',
# 部分一致
'partial': '3',
}
def get_page(url):
time.sleep(1.0)
body = scraper.go(url)
return body
def search_song_ids(query, attribute='lyricist', match_mode='exact'):
# クエリが日本語だと正しく処理されないのでエンコード
search_url = domain + '/search/?Aselect=' + attributes[attribute] + '&Keyword=' + urllib.parse.quote(query) + '&Bselect=' + match_modes[match_mode] + '&sort='
print('曲リストを取得しています:', search_url)
bodies = [get_page(search_url)]
pages = bodies[0].select('#page_list')[0].find_all('a')
if len(pages) > 0:
page_urls = [urllib.parse.urlparse(page.get('href')) for page in pages]
queries = [urllib.parse.parse_qs(page.query) for page in page_urls]
last_page = page_urls[-1]
last_page_num = max([int(query['pnum'][0]) for query in queries])
lpq = queries[-1]
print(last_page_num, 'ページ見つかりました')
for pnum in tqdm(range(2, last_page_num + 1)):
# ページ番号だけ変えて新しくURLを生成
lpq['pnum'] = [str(pnum)]
page = urllib.parse.ParseResult(
last_page.scheme,
last_page.netloc,
last_page.path,
last_page.params,
urllib.parse.urlencode(lpq, True),
''
)
page_url = urllib.parse.urlunparse(page)
bodies.append(get_page(page_url))
else:
print('1ページ見つかりました')
song_ids = []
for body in bodies:
# 歌詞ページのURLを抽出
for td in body.select('.td1'):
song_ids.append(td.find_all('a')[0].get('href'))
return song_ids
def extract_song(song_id):
song_url = domain + song_id
print('曲データを抽出しています:', song_url)
body = get_page(song_url)
title = body.select('.song-infoboard h2')[0].text
# 歌詞内の改行を半角スラッシュ/に置換して抽出
lyric = body.find(id='kashi_area').get_text('/')
artist = body.select('[itemprop="recordedAs"]')[0].text.strip()
lyricist = body.select('[itemprop="lyricist"]')[0].text
composer = body.select('[itemprop="composer"]')[0].text
return {
song_id: {
'title': title,
'lyric': lyric,
'artist': artist,
'lyricist': lyricist,
'composer': composer,
}
}
--- FILE SEPARATOR ---
import argparse
import json
import sqlite3
from pathlib import Path
def main():
parser = argparse.ArgumentParser(description='utanet_scraper.py で抽出した JSON ファイルを SQLite DB に変換')
parser.add_argument('json_dir', type=str, help='JSON ファイルのあるディレクトリ')
parser.add_argument('sqlite_file', type=str, help='SQLite ファイル')
args = parser.parse_args()
sqlite_file = Path(args.sqlite_file)
sqlite_connection = sqlite3.connect(sqlite_file)
sqlite_cursor = sqlite_connection.cursor()
sqlite_cursor.execute('''
create table if not exists utanet_songs(
song_id int primary key,
title text,
lyric text,
artist text,
lyricist text,
composer text
)
''')
query_string = '''
insert into utanet_songs(song_id, title, lyric, artist, lyricist, composer)
values (?, ?, ?, ?, ?, ?)
'''
for json_path in Path(args.json_dir).iterdir():
with json_path.open() as json_file:
song_dict = json.load(json_file)
print('処理中:', json_path.name)
song_id = int(json_path.stem)
song_data = tuple(song_dict.values())[0]
query_values = (
song_id,
song_data['title'],
song_data['lyric'],
song_data['artist'],
song_data['lyricist'],
song_data['composer'],
)
sqlite_cursor.execute(query_string, query_values)
sqlite_connection.commit()
sqlite_connection.close()
print('完了')
if __name__ == "__main__":
main()
--- FILE SEPARATOR ---
import argparse
import json
import urllib
from pathlib import Path
from modules.utanet import extract_song
def main():
parser = argparse.ArgumentParser(description='曲情報を抽出(Ctrl + C で中止)')
parser.add_argument('-o', '--output_dir', type=str, default='songs', help="出力ディレクトリ名(デフォルト:'./songs')")
parser.add_argument('-s', '--starts_with', type=int, default=1, help="指定した ID から抽出を開始(デフォルト:'1')")
args = parser.parse_args()
output_dir = Path(args.output_dir)
Path.mkdir(output_dir, parents=True, exist_ok=True)
song_count = args.starts_with
while True:
try:
song_json_path = output_dir.joinpath('{}.json'.format(song_count))
if song_json_path.is_file():
print('スキップ:ファイル "{}" は既に存在します'.format(song_json_path))
continue
song_dict = extract_song('/song/{}/'.format(song_count))
with song_json_path.open('w', encoding='utf-8') as song_json:
song_json.write(json.dumps(song_dict, ensure_ascii=False, indent=2))
except urllib.error.HTTPError:
print('ID: {} が見つかりません'.format(song_count))
continue
finally:
song_count += 1
if __name__ == '__main__':
main()
|
[
"/json_extractor.py",
"/modules/utanet.py",
"/sqlite_converter.py",
"/utanet_scraper.py"
] |
0-k-1/Practice_turorail
|
from django.urls import path
import books
from books.views import PublisherList
urlpatterns = [
path('publishers/',PublisherList.as_view())
]
--- FILE SEPARATOR ---
from django.shortcuts import render
# Create your views here.
from django.views.generic import ListView
from books.models import Publisher
class PublisherList(ListView):
model = Publisher
|
[
"/books/urls.py",
"/books/views.py"
] |
0-k-1/TodoMVC2
|
from django.db import models
#from django.contrib.auth.models import User
class Todo(models.Model):
title = models.CharField(max_length=50)
completed = models.BooleanField(default=False)
--- FILE SEPARATOR ---
# from django.urls import path
from django.conf.urls import url
from App.views import todoMVC_view,save_view
urlpatterns = [
url('', todoMVC_view),
url(r'^save/', save_view, name='save')
]
--- FILE SEPARATOR ---
from django.shortcuts import render,redirect
from App.models import Todo
import json
# from django.forms.models import model_to_dict
def todoMVC_view(request):
# list=[{"content":"任务1","completed":"True"},{"content":"任务2","completed":"False"}]
# list=[
# {"completed": "false","id": "1","title": "31"},
# {"completed": "true","id": "2","title": "35"},
# {"completed": "true","id": "0","title": "32"}
# ]
# list_value = list.values()
# list = model_to_dict(list[0])
# print(list_value)
ls = Todo.objects.all()
ls = list(ls.values())
print(ls)
return render(request, 'VueExample.html', {"list":json.dumps(ls)})
#return render(request, 'VueExample.html', {"list":list})
def save_view(request):
print(request.POST['q'])
# print(request.body)
# print(type(request.body))
# print(request.body.decode())
# para = json.loads(request.body.decode())
# print(para)
# 直接覆盖
ls = Todo.objects.all()
ls.delete()
for item in json.loads(request.POST['q']):
Todo.objects.create(title=item['title'], completed=item['completed'])
# 删除不起作用
# try:
# for k in item.keys():
# print(k,item[k])
# Todo.objects.update_or_create(id=item['id'],
# defaults={'id': item['id'], 'title': item['title'],
# 'completed': item['completed']})
# except:
# pass
#return render(request, 'VueExample.html')
return redirect('/')
|
[
"/App/models.py",
"/App/urls.py",
"/App/views.py"
] |
0-u-0/webrtc-ios-script
|
#!/usr/bin/env python
import logging
import os
import subprocess
import sys
def IsRealDepotTools(path):
expanded_path = os.path.expanduser(path)
return os.path.isfile(os.path.join(expanded_path, 'gclient.py'))
def add_depot_tools_to_path(source_dir=''):
"""Search for depot_tools and add it to sys.path."""
# First, check if we have a DEPS'd in "depot_tools".
deps_depot_tools = os.path.join(source_dir, 'third_party', 'depot_tools')
if IsRealDepotTools(deps_depot_tools):
# Put the pinned version at the start of the sys.path, in case there
# are other non-pinned versions already on the sys.path.
sys.path.insert(0, deps_depot_tools)
return deps_depot_tools
# Then look if depot_tools is already in PYTHONPATH.
for i in sys.path:
if i.rstrip(os.sep).endswith('depot_tools') and IsRealDepotTools(i):
return i
# Then look if depot_tools is in PATH, common case.
for i in os.environ['PATH'].split(os.pathsep):
if IsRealDepotTools(i):
sys.path.append(i.rstrip(os.sep))
return i
# Rare case, it's not even in PATH, look upward up to root.
root_dir = os.path.dirname(os.path.abspath(__file__))
previous_dir = os.path.abspath(__file__)
while root_dir and root_dir != previous_dir:
i = os.path.join(root_dir, 'depot_tools')
if IsRealDepotTools(i):
sys.path.append(i)
return i
previous_dir = root_dir
root_dir = os.path.dirname(root_dir)
logging.error('Failed to find depot_tools')
return None
def _RunCommand(cmd):
logging.debug('Running: %r', cmd)
subprocess.check_call(cmd)
def _RunGN(args):
logging.info('Gn args : %s', args)
cmd = [sys.executable, os.path.join(add_depot_tools_to_path(), 'gn.py')]
cmd.extend(args)
_RunCommand(cmd)
def _RunNinja(output_directory, args):
logging.info('Ninja args : %s', args)
cmd = [os.path.join(add_depot_tools_to_path(), 'ninja'),
'-C', output_directory]
cmd.extend(args)
_RunCommand(cmd)
def _EncodeForGN(value):
"""Encodes value as a GN literal."""
if isinstance(value, str):
return '"' + value + '"'
elif isinstance(value, bool):
return repr(value).lower()
else:
return repr(value)
def Build(output_directory, gn_args, ninja_target_args):
"""Generates target architecture using GN and builds it using ninja."""
gn_args_str = '--args=' + ' '.join([k + '=' + _EncodeForGN(v) for k, v in gn_args.items()])
gn_args_list = ['gen', output_directory, gn_args_str]
_RunGN(gn_args_list)
_RunNinja(output_directory, ninja_target_args)
--- FILE SEPARATOR ---
#!/usr/bin/env python
import os
import argparse
import logging
import sys
from distutils import dir_util
from build_tools import Build, _RunCommand
# disable x86-64 when you intend to distribute app through the app store
# https://webrtc.github.io/webrtc-org/native-code/ios/
# DEFAULT_ARCHS = ['arm64', 'arm', 'x64', 'x86']
DEFAULT_ARCHS = ['arm64', 'arm', 'x64']
TARGETS = ['sdk:framework_objc']
OUT_DIR = 'out'
SDK_FRAMEWORK_NAME = 'WebRTC.framework'
def parse_args():
parser = argparse.ArgumentParser(description='Collect and build WebRTC iOS framework.')
parser.add_argument('-s', '--source-dir', help='WebRTC source dir. Example: /realpath/to/src')
parser.add_argument('-v', '--verbose', action='store_true', help='Debug logging.')
parser.add_argument('-r', '--is-release', action='store_true', help='Release or not.')
parser.add_argument('--use-bitcode', action='store_true', help='Use bitcode or not.')
parser.add_argument('--enable-vp9', action='store_true', help='Enable VP9 SoftCodec or not.')
return parser.parse_args()
def get_debug_dir(is_debug):
if is_debug:
return 'Debug'
else:
return 'Release'
def build_ios_framework(src_dir, is_debug, bitcode):
gn_args = {
'target_os': 'ios',
'ios_enable_code_signing': False,
'use_xcode_clang': True,
'is_debug': is_debug,
'ios_deployment_target': '10.0',
'enable_stripping': True,
'enable_dsyms': not bitcode,
'enable_ios_bitcode': bitcode
}
ninja_target_args = TARGETS
for arch in DEFAULT_ARCHS:
gn_args['target_cpu'] = arch
build_dir = os.path.join(src_dir, OUT_DIR, get_debug_dir(is_debug), arch)
logging.info('Build dir : %s', build_dir)
Build(build_dir, gn_args, ninja_target_args)
def create_fat_library(src_dir, is_debug):
output_dir = os.path.join(src_dir, OUT_DIR, get_debug_dir(is_debug))
lib_paths = [os.path.join(output_dir, arch)
for arch in DEFAULT_ARCHS]
# Combine the slices.
dylib_path = os.path.join(SDK_FRAMEWORK_NAME, 'WebRTC')
# Dylibs will be combined, all other files are the same across archs.
# Use distutils instead of shutil to support merging folders.
dir_util.copy_tree(
os.path.join(lib_paths[0], SDK_FRAMEWORK_NAME),
os.path.join(output_dir, SDK_FRAMEWORK_NAME))
logging.info('Merging framework slices.')
dylib_paths = [os.path.join(path, dylib_path) for path in lib_paths]
out_dylib_path = os.path.join(output_dir, dylib_path)
try:
os.remove(out_dylib_path)
except OSError:
pass
cmd = ['lipo'] + dylib_paths + ['-create', '-output', out_dylib_path]
_RunCommand(cmd)
# Merge the dSYM slices.
lib_dsym_dir_path = os.path.join(lib_paths[0], 'WebRTC.dSYM')
if os.path.isdir(lib_dsym_dir_path):
dir_util.copy_tree(lib_dsym_dir_path, os.path.join(output_dir, 'WebRTC.dSYM'))
logging.info('Merging dSYM slices.')
dsym_path = os.path.join('WebRTC.dSYM', 'Contents', 'Resources', 'DWARF', 'WebRTC')
lib_dsym_paths = [os.path.join(path, dsym_path) for path in lib_paths]
out_dsym_path = os.path.join(output_dir, dsym_path)
try:
os.remove(out_dsym_path)
except OSError:
pass
cmd = ['lipo'] + lib_dsym_paths + ['-create', '-output', out_dsym_path]
_RunCommand(cmd)
logging.info('Done.')
def main():
args = parse_args()
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
if not args.source_dir:
src_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
else:
src_dir = args.source_dir
if os.path.isdir(src_dir):
is_debug = not args.is_release
build_ios_framework(src_dir, is_debug, args.use_bitcode)
create_fat_library(src_dir, is_debug)
else:
logging.error('Src path not exists : %s', src_dir)
if __name__ == '__main__':
sys.exit(main())
|
[
"/build_tools.py",
"/main.py"
] |
0/pathintmatmult
|
#!/usr/bin/env python3
"""
Harmonic oscillator PIFT example.
An oscillator with an angular frequency of x kelvin at reciprocal temperature
beta reciprocal kelvin has a thermal potential energy (in kelvin) of
(1/4) x coth(0.5 beta x)
and a total energy of twice that. For example, for an oscillator with an
angular frequency of 1 K, at 0.1 K the thermal averages are approximately
0.2500 K and 0.5000 K (very nearly the zero point energies), while at 10 K they
are approximately 5.0042 K and 10.008 K. By 100 K, the total energy is about
100.00 K, so we are effectively at the classical limit.
"""
from argparse import ArgumentParser
from pathintmatmult import PIFTMM
from pathintmatmult.constants import HBAR, KB, ME
from pathintmatmult.potentials import harmonic_potential
# Parse arguments.
p = ArgumentParser(description='Calculate HO thermal properties using PIFTMM.')
p_config = p.add_argument_group('configuration')
p_config.add_argument('--mass', metavar='M', type=float, required=True, help='particle mass (electron masses)')
p_config.add_argument('--omega', metavar='W', type=float, required=True, help='angular frequency (K)')
p_config.add_argument('--grid-range', metavar='R', type=float, required=True, help='grid range from origin (nm)')
p_config.add_argument('--grid-len', metavar='L', type=int, required=True, help='number of points on grid')
p_config.add_argument('--beta', metavar='B', type=float, required=True, help='reciprocal temperature (1/K)')
p_config.add_argument('--num-links', metavar='P', type=int, required=True, help='number of links')
p.add_argument('--density-out', metavar='FILE', help='path to output density plot')
args = p.parse_args()
mass = args.mass * ME # g/mol
omega = args.omega * KB / HBAR # 1/ps
grid_range = args.grid_range # nm
grid_len = args.grid_len # 1
beta = args.beta / KB # mol/kJ
num_links = args.num_links # 1
density_out = args.density_out
# Calculate values.
harmonic = harmonic_potential(m=mass, w=omega)
ho_pift = PIFTMM([mass], [grid_range], [grid_len], harmonic, beta, num_links)
estimated_potential_energy = ho_pift.expectation_value(harmonic) / KB # K
print('V = {} K'.format(estimated_potential_energy))
# According to the virial theorem, <K> = <V> for a harmonic oscillator.
print('E_virial = {} K'.format(2 * estimated_potential_energy))
# Output plot.
if density_out:
from pathintmatmult.plotting import plot2d
xy_range = (-grid_range, grid_range)
plot2d(ho_pift.density, xy_range, xy_range, density_out, x_label=r'$q_j / \mathrm{nm}$', y_label=r'$q_i / \mathrm{nm}$')
--- FILE SEPARATOR ---
#!/usr/bin/env python3
"""
Harmonic oscillator PIGS example.
An oscillator with an angular frequency of x kelvin has a ground state
potential energy of x/4 kelvin and a total energy of x/2 kelvin. One with a
mass of 1 electron mass and angular frequency of 1 K has a spread of about 120
nm in either direction from the origin; one with a mass of 10 electron masses
spreads about 40 nm. The following are some possible combinations of arguments
to try:
--mass 1 --omega 1 --grid-range 120 --grid-len 100 --beta 12 --num-links 1200
--mass 10 --omega 1 --grid-range 40 --grid-len 100 --beta 12 --num-links 1200
If --trial-deform is not given, a uniform trial function is used. If it is
given, the exact ground state is used as the trial fuction, but is deformed by
the given factor (1 corresponds to no deformation).
"""
from argparse import ArgumentParser
import numpy as np
from pathintmatmult import PIGSMM
from pathintmatmult.constants import HBAR, KB, ME
from pathintmatmult.potentials import harmonic_potential
# Parse arguments.
p = ArgumentParser(description='Calculate HO ground state properties using PIGSMM.')
p_config = p.add_argument_group('configuration')
p_config.add_argument('--mass', metavar='M', type=float, required=True, help='particle mass (electron masses)')
p_config.add_argument('--omega', metavar='W', type=float, required=True, help='angular frequency (K)')
p_config.add_argument('--grid-range', metavar='R', type=float, required=True, help='grid range from origin (nm)')
p_config.add_argument('--grid-len', metavar='L', type=int, required=True, help='number of points on grid')
p_config.add_argument('--beta', metavar='B', type=float, required=True, help='propagation length (1/K)')
p_config.add_argument('--num-links', metavar='P', type=int, required=True, help='number of links')
p_config.add_argument('--trial-deform', metavar='D', type=float, help='deformation factor for exact trial function')
p.add_argument('--wf-out', metavar='FILE', help='path to output wavefunction values')
p.add_argument('--density-out', metavar='FILE', help='path to output density plot')
args = p.parse_args()
mass = args.mass * ME # g/mol
omega = args.omega * KB / HBAR # 1/ps
grid_range = args.grid_range # nm
grid_len = args.grid_len # 1
beta = args.beta / KB # mol/kJ
num_links = args.num_links # 1
trial_deform = args.trial_deform
wf_out = args.wf_out
density_out = args.density_out
# Calculate values.
harmonic = harmonic_potential(m=mass, w=omega)
kwargs = {}
if trial_deform is not None:
alpha = trial_deform * mass * omega / HBAR # 1/nm^2
def trial_f(q: 'nm') -> '1':
return np.exp(-0.5 * alpha * q[..., 0] ** 2)
def trial_f_diff(q: 'nm') -> '1/nm^2':
return alpha * (alpha * q[..., 0] ** 2 - 1) * trial_f(q)
kwargs['trial_f'] = trial_f
kwargs['trial_f_diffs'] = [trial_f_diff]
ho_pigs = PIGSMM([mass], [grid_range], [grid_len], harmonic, beta, num_links, **kwargs)
estimated_potential_energy = ho_pigs.expectation_value(harmonic) / KB # K
estimated_total_energy = ho_pigs.energy_mixed / KB # K
print('V = {} K'.format(estimated_potential_energy))
# According to the virial theorem, <K> = <V> for a harmonic oscillator.
print('E_virial = {} K'.format(2 * estimated_potential_energy))
print('E_mixed = {} K'.format(estimated_total_energy))
# Output wavefunction.
if wf_out:
np.savetxt(wf_out, np.hstack((ho_pigs.grid, ho_pigs.ground_wf[:, np.newaxis])))
# Output plot.
if density_out:
from pathintmatmult.plotting import plot2d
xy_range = (-grid_range, grid_range)
plot2d(ho_pigs.density, xy_range, xy_range, density_out, x_label=r'$q_j / \mathrm{nm}$', y_label=r'$q_i / \mathrm{nm}$')
--- FILE SEPARATOR ---
#!/usr/bin/env python3
"""
Entangled harmonic oscillators PIGS example.
A pair of identical harmonic oscillators with a harmonic interaction potential.
"""
from argparse import ArgumentParser
import numpy as np
from pathintmatmult import PIGSIMM
from pathintmatmult.constants import HBAR, KB, ME
from pathintmatmult.potentials import harmonic_potential
# Parse arguments.
p = ArgumentParser(description='Calculate entangled HO ground state properties using PIGSMM2.')
p_config = p.add_argument_group('configuration')
p_config.add_argument('--mass', metavar='M', type=float, required=True, help='particle mass (electron masses)')
p_config.add_argument('--omega-0', metavar='W', type=float, required=True, help='central potential angular frequency (K)')
p_config.add_argument('--omega-int', metavar='W', type=float, required=True, help='interaction potential angular frequency (K)')
p_config.add_argument('--grid-range', metavar='R', type=float, required=True, help='grid range from origin (nm)')
p_config.add_argument('--grid-len', metavar='L', type=int, required=True, help='number of points on grid')
p_config.add_argument('--beta', metavar='B', type=float, required=True, help='propagation length (1/K)')
p_config.add_argument('--num-links', metavar='P', type=int, required=True, help='number of links')
p_config.add_argument('--trial-deform', metavar='D', type=float, help='deformation factor for exact trial function')
p.add_argument('--wf-out', metavar='FILE', help='path to output wavefunction values')
p.add_argument('--density-diagonal-out', metavar='FILE', help='path to output diagonal density plot')
args = p.parse_args()
mass = args.mass * ME # g/mol
omega_0 = args.omega_0 * KB / HBAR # 1/ps
omega_int = args.omega_int * KB / HBAR # 1/ps
grid_range = args.grid_range # nm
grid_len = args.grid_len # 1
beta = args.beta / KB # mol/kJ
num_links = args.num_links # 1
trial_deform = args.trial_deform
wf_out = args.wf_out
density_diagonal_out = args.density_diagonal_out
# Calculate values.
pot_0 = harmonic_potential(m=mass, w=omega_0)
pot_int = harmonic_potential(m=mass, w=omega_int)
def total_potential(qs: '[nm]') -> 'kJ/mol':
return pot_0(qs[..., [0]]) + pot_0(qs[..., [1]]) + pot_int(qs[..., [0]] - qs[..., [1]])
kwargs = {}
if trial_deform is not None:
alpha = trial_deform * mass / HBAR # ps/nm^2
omega_R = omega_0 # 1/ps
omega_r = np.sqrt(omega_0 * omega_0 + 2 * omega_int * omega_int) # 1/ps
omega_p = omega_R + omega_r # 1/ps
omega_m = omega_R - omega_r # 1/ps
def trial_f(qs: '[nm]') -> '1':
return np.exp(-0.25 * alpha * (omega_p * (qs[..., 0] ** 2 + qs[..., 1] ** 2) + 2 * omega_m * qs[..., 0] * qs[..., 1]))
def trial_f_diff_0(qs: '[nm]') -> '1/nm^2':
return 0.5 * alpha * (0.5 * alpha * (omega_p * qs[..., 0] + omega_m * qs[..., 1]) ** 2 - omega_p) * trial_f(qs)
def trial_f_diff_1(qs: '[nm]') -> '1/nm^2':
return 0.5 * alpha * (0.5 * alpha * (omega_m * qs[..., 0] + omega_p * qs[..., 1]) ** 2 - omega_p) * trial_f(qs)
kwargs['trial_f'] = trial_f
kwargs['trial_f_diffs'] = [trial_f_diff_0, trial_f_diff_1]
ho_pigs = PIGSIMM([mass, mass], [grid_range, grid_range], [grid_len, grid_len], total_potential, beta, num_links, **kwargs)
estimated_potential_energy = ho_pigs.expectation_value(total_potential) / KB # K
estimated_total_energy = ho_pigs.energy_mixed / KB # K
estimated_trace = ho_pigs.trace_renyi2
print('V = {} K'.format(estimated_potential_energy))
print('E_mixed = {} K'.format(estimated_total_energy))
print('trace = {}'.format(estimated_trace))
# Output wavefunction.
if wf_out:
np.savetxt(wf_out, np.hstack((ho_pigs.grid, ho_pigs.ground_wf[:, np.newaxis])))
# Output plot.
if density_diagonal_out:
from pathintmatmult.plotting import plot2d
xy_range = (-grid_range, grid_range)
density = ho_pigs.density_diagonal.reshape(grid_len, grid_len)
plot2d(density, xy_range, xy_range, density_diagonal_out, x_label=r'$q_2 / \mathrm{nm}$', y_label=r'$q_1 / \mathrm{nm}$')
--- FILE SEPARATOR ---
from .nmm import PIFTMM, PIGSIMM, PIGSMM
--- FILE SEPARATOR ---
"""
Numerical matrix multiplication for path integrals.
"""
from itertools import product
import numpy as np
from .constants import HBAR
from .tools import cached
class PIMM:
"""
Path Integrals via Matrix Multiplication
Base class for various kinds of path integral implementations.
"""
def __init__(self, masses: '[g/mol]', grid_ranges: '[nm]',
grid_lens: '[1]', pot_f: '[nm] -> kJ/mol',
beta: 'mol/kJ', num_links: '1'):
"""
Note:
When pot_f receives an N-dimensional array as input, it needs to map
over it, returning an (N-1)-dimensional array.
Note:
The "particles" are actually any Cartesian degrees of freedom. One
might have the same configuration (masses and grids) for a
3-dimensional 1-particle system as for a 1-dimensional 3-particle
system. Of course, the coordinate arrays must be interpreted
appropriately in each case (whether by the potential function or by
the user of the output density).
Parameters:
masses: Masses of the particles.
grid_ranges: Where the grids are truncated. Each grid is symmetric
about the origin.
grid_lens: How many points are on the grids.
beta: Propagation length of the entire path.
num_links: Number of links in the entire path.
pot_f: Potential experienced by the particles in some spatial
configuration.
"""
assert len(masses) == len(grid_ranges) == len(grid_lens), \
'Numbers of configuration items must match.'
assert all(m > 0 for m in masses), 'Masses must be positive.'
assert all(gr > 0 for gr in grid_ranges), 'Grids must have positive lengths.'
assert all(gl >= 2 for gl in grid_lens), 'Grids must have at least two points.'
assert beta > 0, 'Beta must be positive.'
assert num_links >= 2, 'Must have at least two links.'
self._masses = np.array(masses)
self._grid_ranges = np.array(grid_ranges)
self._grid_lens = np.array(grid_lens)
self._pot_f = pot_f
self._beta = beta
self._num_links = num_links
# For cached decorator.
self._cached = {}
@property
def masses(self) -> '[g/mol]':
return self._masses
@property
def grid_ranges(self) -> '[nm]':
return self._grid_ranges
@property
def grid_lens(self) -> '[1]':
return self._grid_lens
@property
def pot_f(self) -> '[nm] -> kJ/mol':
return self._pot_f
@property
def beta(self) -> 'mol/kJ':
return self._beta
@property
def num_links(self) -> '1':
return self._num_links
@property
@cached
def tau(self) -> 'mol/kJ':
"""
High-temperature propagator length.
"""
return self.beta / self.num_links
@property
@cached
def num_points(self) -> '1':
"""
Number of points in the coordinate vector.
"""
return np.prod(self.grid_lens)
@property
@cached
def grid(self) -> '[[nm]]':
"""
Vector of the positions corresponding to the grid points.
This is not a vector in the sense of a 1-dimensional array, because
each element is itself a vector of coordinates for each particle.
However, it can be thought of as the tensor product of the
1-dimensional position vectors.
"""
grids = [np.linspace(-gr, gr, gl) for (gr, gl) in zip(self.grid_ranges, self.grid_lens)]
result = np.array(list(product(*grids)))
assert result.shape == (self.num_points, len(self.masses))
return result
@property
@cached
def volume_element(self) -> 'nm^N':
"""
Effective volume taken up by each grid point.
"""
return np.prod(2 * self.grid_ranges / (self.grid_lens - 1))
@property
@cached
def pot_f_grid(self) -> '[kJ/mol]':
"""
Potential function evaluated on the grid.
"""
return self.pot_f(self.grid)
@property
@cached
def rho_tau(self) -> '[[1/nm^N]]':
"""
Matrix for the high-temperature propagator.
"""
prefactors_K = self.masses / (2 * HBAR * HBAR * self.tau) # [1/nm^2]
prefactor_V = self.tau / 2 # mol/kJ
prefactor_front = np.sqrt(np.prod(prefactors_K) / np.pi) # 1/nm^N
K = np.empty((self.num_points, self.num_points)) # [[nm^2]]
V = np.empty_like(K) # [[kJ/mol]]
for i, q_i in enumerate(self.grid):
for j, q_j in enumerate(self.grid):
K[i, j] = np.sum(prefactors_K * (q_i - q_j) ** 2)
V[i, j] = self.pot_f_grid[i] + self.pot_f_grid[j]
return prefactor_front * np.exp(-K - prefactor_V * V)
@property
def density_diagonal(self):
raise NotImplementedError()
def expectation_value(self, property_f: '[nm] -> X') -> 'X':
"""
Expectation value of property_f.
Note:
This is only implemented for properties that are diagonal in the
position representation.
Note:
When property_f receives an N-dimensional array as input, it should
behave in the same manner as pot_f.
"""
return np.dot(self.density_diagonal, property_f(self.grid))
class PIFTMM(PIMM):
"""
Path Integral at Finite Temperature via Matrix Multiplication
Calculate the approximate thermal density matrix of a system comprised of
one or more particles in an arbitrary potential on a discretized and
truncated grid. The density matrix is determined via numerical matrix
multiplication of high-temperature matrices.
"""
@property
@cached
def rho_beta(self) -> '[[1/nm^N]]':
"""
Matrix for the full path propagator.
"""
power = self.num_links - 1
eigvals, eigvecs = np.linalg.eigh(self.volume_element * self.rho_tau)
result = np.dot(np.dot(eigvecs, np.diag(eigvals ** power)), eigvecs.T)
return result / self.volume_element
@property
@cached
def density(self) -> '[[1]]':
"""
Normalized thermal density matrix.
"""
density = self.rho_beta
# Explicitly normalize.
density /= density.diagonal().sum()
return density
@property
@cached
def density_diagonal(self) -> '[1]':
"""
Normalized thermal diagonal density.
"""
return self.density.diagonal()
class PIGSMM(PIMM):
"""
Path Integral Ground State via Matrix Multiplication
Calculate the approximate ground state wavefunction of a system comprised
of one or more particles in an arbitrary potential on a discretized and
truncated grid. The wavefunction is determined via imaginary time
propagation from a trial function using numerical matrix multiplication.
"""
def __init__(self, masses: '[g/mol]', grid_ranges: '[nm]',
grid_lens: '[1]', pot_f: '[nm] -> kJ/mol',
beta: 'mol/kJ', num_links: '1', *,
trial_f: '[nm] -> 1' = None,
trial_f_diffs: '[[nm] -> 1/nm^2]' = None):
"""
See PIMM.__init__ for more details.
Note:
The convention used is that beta represents the entire path, so the
propagation length from the trial function to the middle of the path
is beta/2.
Note:
When trial_f receives an N-dimensional array as input, it should
behave in the same manner as pot_f.
Parameters:
trial_f: Approximation to the ground state wavefunction. If none is
provided, a uniform trial function is used.
trial_f_diffs: Second derivatives of trial_f. One function must be
specified for each particle.
"""
super().__init__(masses, grid_ranges, grid_lens, pot_f, beta, num_links)
assert num_links % 2 == 0, 'Number of links must be even.'
if trial_f is not None:
assert trial_f_diffs is not None, 'Derivatives must be provided.'
assert len(trial_f_diffs) == len(masses), 'Number of derivatives must match.'
self._trial_f = trial_f
self._trial_f_diffs = trial_f_diffs
@property
def trial_f(self) -> '[nm] -> 1':
return self._trial_f
@property
def trial_f_diffs(self) -> '[[nm] -> 1/nm^2]':
return self._trial_f_diffs
@property
@cached
def uniform_trial_f_grid(self) -> '[1]':
"""
Unnormalized uniform trial function evaluated on the grid.
"""
return np.ones(self.num_points)
@property
@cached
def trial_f_grid(self) -> '[1]':
"""
Unnormalized trial function evaluated on the grid.
"""
if self.trial_f is None:
# Default to a uniform trial function.
return self.uniform_trial_f_grid
return self.trial_f(self.grid)
@property
@cached
def uniform_trial_f_diffs_grid(self) -> '[[1/nm^2]]':
"""
Unnormalized uniform trial function derivatives evaluated on the grid.
"""
return np.zeros(self.grid.T.shape)
@property
@cached
def trial_f_diffs_grid(self) -> '[[1/nm^2]]':
"""
Unnormalized trial function derivatives evaluated on the grid.
"""
if self.trial_f is None:
# Default to a uniform trial function.
return self.uniform_trial_f_diffs_grid
result = np.empty(self.grid.T.shape)
for i, f in enumerate(self.trial_f_diffs):
result[i] = f(self.grid)
return result
@property
@cached
def rho_beta_half(self) -> '[[1/nm^N]]':
"""
Matrix for the half path propagator.
"""
power = self.num_links // 2
eigvals, eigvecs = np.linalg.eigh(self.volume_element * self.rho_tau)
result = np.dot(np.dot(eigvecs, np.diag(eigvals ** power)), eigvecs.T)
return result / self.volume_element
@property
@cached
def rho_beta(self) -> '[[1/nm^N]]':
"""
Matrix for the full path propagator.
"""
return self.volume_element * np.dot(self.rho_beta_half, self.rho_beta_half)
@property
@cached
def ground_wf(self) -> '[1]':
"""
Normalized ground state wavefunction.
"""
ground_wf = np.dot(self.rho_beta_half, self.trial_f_grid)
# Explicitly normalize.
ground_wf /= np.sqrt(np.sum(ground_wf ** 2))
return ground_wf
@property
@cached
def density(self) -> '[[1]]':
"""
Normalized ground state density matrix.
"""
return np.outer(self.ground_wf, self.ground_wf)
@property
@cached
def density_diagonal(self) -> '[1]':
"""
Normalized ground state diagonal density.
"""
return self.ground_wf ** 2
@property
@cached
def energy_mixed(self) -> 'kJ/mol':
"""
Ground state energy calculated using the mixed estimator.
"""
ground_wf_full = np.dot(self.rho_beta, self.trial_f_grid) # [1/nm^N]
trial_f_diffs = np.sum(self.trial_f_diffs_grid / self.masses[:, np.newaxis], axis=0) # [mol/g nm^2]
energy_V = np.sum(ground_wf_full * self.pot_f_grid * self.trial_f_grid) # kJ/mol nm^N
energy_K = np.dot(ground_wf_full, trial_f_diffs) # mol/g nm^(N+2)
normalization = np.dot(ground_wf_full, self.trial_f_grid) # 1/nm^N
return (energy_V - 0.5 * HBAR * HBAR * energy_K) / normalization
@property
@cached
def density_reduced(self) -> '[[1]]':
"""
Density matrix for the first particle, with the other traced out.
Only implemented for two-particle systems.
"""
assert len(self.masses) == 2
new_len = self.grid_lens[0]
other_len = self.grid_lens[1]
density_new = np.zeros((new_len, new_len))
for i in range(new_len):
for j in range(new_len):
for t in range(other_len):
# Avoid computing self.density here.
density_new[i, j] += self.ground_wf[other_len * i + t] * self.ground_wf[other_len * j + t]
return density_new
@property
@cached
def trace_renyi2(self) -> '1':
"""
Trace of the square of the reduced density matrix.
The 2nd Rényi entropy is the negative logarithm of this quantity.
"""
return np.linalg.matrix_power(self.density_reduced, 2).trace()
class PIGSIMM(PIGSMM):
"""
Path Integral Ground State via Implicit Matrix Multiplication
Calculate the approximate ground state wavefunction of a system comprised
of one or more particles in an arbitrary potential on a discretized and
truncated grid. The wavefunction is determined via imaginary time
propagation from a trial function using implicit numerical matrix-vector
multiplication, where the full density matrix is never constructed.
"""
@property
def rho_tau(self):
# We don't build any (full) matrices!
raise NotImplementedError()
@property
def rho_beta_half(self):
raise NotImplementedError()
@property
def rho_beta(self):
raise NotImplementedError()
def _propagate_trial(self, start_grid: '[1]', power: '1') -> '[1]':
"""
Multiply start_grid by (rho_tau ** power).
"""
prefactors_K = self.masses / (2 * HBAR * HBAR * self.tau) # [1/nm^2]
pot_exp = np.exp(-0.5 * self.tau * self.pot_f_grid) # [1]
temp_wf1 = start_grid.copy() # [1]
temp_wf2 = np.zeros_like(temp_wf1) # [1]
for _ in range(power):
temp_wf1 *= pot_exp
for q, wf in zip(self.grid, temp_wf1):
# The temporary array here is the same shape as self.grid.
temp_wf2 += np.exp(-np.sum(prefactors_K * (self.grid - q) ** 2, axis=1)) * wf
temp_wf2 *= pot_exp
# Explicitly normalize at each step for stability.
temp_wf1 = temp_wf2 / np.sqrt(np.sum(temp_wf2 ** 2))
temp_wf2 = np.zeros_like(temp_wf1)
return temp_wf1
@property
@cached
def ground_wf(self) -> '[1]':
"""
Normalized ground state wavefunction.
"""
return self._propagate_trial(self.trial_f_grid, self.num_links // 2)
@property
def density(self):
raise NotImplementedError()
@property
@cached
def energy_mixed(self) -> 'kJ/mol':
"""
Ground state energy calculated using the mixed estimator.
"""
ground_wf_full = self._propagate_trial(self.ground_wf, self.num_links // 2) # [1]
trial_f_diffs = np.sum(self.trial_f_diffs_grid / self.masses[:, np.newaxis], axis=0) # [mol/g nm^2]
energy_V = np.sum(ground_wf_full * self.pot_f_grid * self.trial_f_grid) # kJ/mol
energy_K = np.dot(ground_wf_full, trial_f_diffs) # mol/g nm^2
normalization = np.dot(ground_wf_full, self.trial_f_grid) # 1
return (energy_V - 0.5 * HBAR * HBAR * energy_K) / normalization
--- FILE SEPARATOR ---
"""
Convenience functions for plotting the generated data.
"""
import matplotlib.pyplot as plt
def plot2d(data: '[[X]]', x_range, y_range, out_path, *, x_label=None, y_label=None, colormap='jet', colorbar=True):
"""
Plot the data as a heat map.
The resulting image is saved to out_path.
Parameters:
data: Two-dimensional array of numbers to plot.
x_range: Tuple containing the min and max values for the x axis.
y_range: Tuple containing the min and max values for the y axis.
out_path: The path to the file where the image should be written. The
extension determines the image format (e.g. pdf, png).
x_label: Label for the x axis.
y_label: Label for the y axis.
colormap: matplotlib colormap to use for the image.
colorbar: Whether to display the colorbar.
"""
fig = plt.figure()
ax = fig.gca()
img = ax.imshow(data, cmap=colormap, origin='lower', extent=(x_range + y_range))
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
if colorbar:
fig.colorbar(img, drawedges=False)
fig.savefig(out_path, bbox_inches='tight', transparent=True)
--- FILE SEPARATOR ---
"""
Example potential functions.
"""
import numpy as np
def free_particle_potential() -> 'nm -> kJ/mol':
"""
Free particle potential.
"""
def free_particle(q: 'nm') -> 'kJ/mol':
# Remove the inner-most dimension.
return np.zeros(q.shape[:-1])
return free_particle
def harmonic_potential(k: 'kJ/mol nm^2' = None, m: 'g/mol' = None, w: '1/ps' = None) -> 'nm -> kJ/mol':
"""
Harmonic potential relative to the origin.
Note:
Either k or (m and w) must be specified.
Parameters:
k: Spring constant.
m: Mass of particle.
w: Angular frequency of oscillator.
"""
if k is not None:
force_constant = k # kJ/mol nm^2
elif m is not None and w is not None:
force_constant = m * w * w # kJ/mol nm^2
else:
assert False, 'Must provide either k or (m and w).'
def harmonic(q: 'nm') -> 'kJ/mol':
return force_constant * q[..., 0] * q[..., 0] / 2
return harmonic
--- FILE SEPARATOR ---
"""
Assorted tools.
"""
from functools import wraps
def cached(f):
"""
A simple cache for constant instance methods.
Requires a _cached dict on the instance.
"""
@wraps(f)
def wrapped(self, *args, **kwargs):
if f not in self._cached:
self._cached[f] = f(self, *args, **kwargs)
return self._cached[f]
return wrapped
|
[
"/examples/pift_harmonic_oscillator.py",
"/examples/pigs_harmonic_oscillator.py",
"/examples/pigs_harmonic_oscillator_entangled.py",
"/pathintmatmult/__init__.py",
"/pathintmatmult/nmm.py",
"/pathintmatmult/plotting.py",
"/pathintmatmult/potentials.py",
"/pathintmatmult/tools.py"
] |
00-00-00-11/Discord-S.C.U.M
|
import inspect
class LogLevel:
INFO = '\033[94m'
OK = '\033[92m'
WARNING = '\033[93m'
DEFAULT = '\033[m'
class Logger:
@staticmethod
def LogMessage(msg, hex_data='', to_file=False, to_console=True, log_level=LogLevel.INFO): #to_file was acting a bit buggy so I decided to remove it altogether for now
stack = inspect.stack()
function_name = "({}->{})".format(str(stack[1][0].f_locals['self']).split(' ')[0], stack[1][3])
if to_console is True:
if hex_data != '':
print('{} {}'.format(log_level, " ".join([h.encode('hex') for h in hex_data])))
else:
print('{} [+] {} {}'.format(log_level, function_name, msg))
print(LogLevel.DEFAULT) # restore console color
--- FILE SEPARATOR ---
from .discum import *
from .gateway.gateway import *
from .Logger import *
from .login.Login import *
--- FILE SEPARATOR ---
from .guild.guild import Guild
from .messages.messages import Messages
from .messages.embed import Embedder
from .user.user import User
from .login.Login import *
from .gateway.gateway import *
import time
import random
import re
import user_agents
class SessionSettingsError(Exception):
pass
class Client:
def __init__(self, email="none", password="none", token="none", proxy_host=None, proxy_port=None, user_agent="random", log=True): #not using None on email, pass, and token since that could get flagged by discord...
self.log = log
self.__user_token = token
self.__user_email = email
self.__user_password = password
self.__proxy_host = None if proxy_host in (None,False) else proxy_host
self.__proxy_port = None if proxy_host in (None,False) else proxy_host
self.session_settings = [] #consists of 2 parts, READY and READY_SUPPLEMENTAL
self.discord = 'https://discord.com/api/v8/'
self.websocketurl = 'wss://gateway.discord.gg/?encoding=json&v=8'
if user_agent != "random":
self.__user_agent = user_agent
else:
from random_user_agent.user_agent import UserAgent #only really want to import this if needed...which is why it's down here
self.__user_agent = UserAgent(limit=100).get_random_user_agent()
if self.log: print('Randomly generated user agent: '+self.__user_agent)
parseduseragent = user_agents.parse(self.__user_agent)
self.ua_data = {'os':parseduseragent.os.family,'browser':parseduseragent.browser.family,'device':parseduseragent.device.family if parseduseragent.is_mobile else '','browser_user_agent':self.__user_agent,'browser_version':parseduseragent.browser.version_string,'os_version':parseduseragent.os.version_string}
if self.__user_token in ("none",None,False): #assuming email and pass are given...
self.__login = Login(self.discord,self.__user_email,self.__user_password,self.__user_agent,self.__proxy_host,self.__proxy_port,self.log)
self.__user_token = self.__login.GetToken() #update token from "none" to true string value
time.sleep(1)
self.headers = {
"Host": "discord.com",
"User-Agent": self.__user_agent,
"Accept": "*/*",
"Accept-Language": "en-US",
"Authorization": self.__user_token,
"Connection": "keep-alive",
"keep-alive" : "timeout=10, max=1000",
"TE": "Trailers",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"Referer": "https://discord.com/channels/@me",
"Content-Type": "application/json"
}
self.s = requests.Session()
self.s.headers.update(self.headers)
if self.__proxy_host != None: #self.s.proxies defaults to {}
self.proxies = {
'http': self.__proxy_host+':'+self.__proxy_port,
'https': self.__proxy_host+':'+self.__proxy_port
}
self.s.proxies.update(proxies)
if self.log: print("Retrieving Discord's build number...")
discord_login_page_exploration = self.s.get('https://discord.com/login').text
time.sleep(1)
try: #getting the build num is kinda experimental since who knows if discord will change where the build number is located...
file_with_build_num = 'https://discord.com/assets/'+re.compile(r'assets/+([a-z0-9]+)\.js').findall(discord_login_page_exploration)[-2]+'.js' #fastest solution I could find since the last js file is huge in comparison to 2nd from last
req_file_build = self.s.get(file_with_build_num).text
index_of_build_num = req_file_build.find('buildNumber')+14
self.discord_build_num = int(req_file_build[index_of_build_num:index_of_build_num+5])
self.ua_data['build_num'] = self.discord_build_num #putting this onto ua_data since getting the build num won't necessarily work
if self.log: print('Discord is currently on build number '+str(self.discord_build_num))
except:
if self.log: print('Could not retrieve discord build number.')
self.gateway = GatewayServer(self.websocketurl, self.__user_token, self.ua_data, self.__proxy_host, self.__proxy_port, self.log)
'''
test connection (this function was originally in discum and was created by Merubokkusu)
'''
def connectionTest(self): #,proxy):
url=self.discord+'users/@me/affinities/users'
connection = self.s.get(url)
if(connection.status_code == 200):
if self.log: print("Connected")
else:
if self.log: print("Incorrect Token")
return connection
'''
discord snowflake to unix timestamp and back
'''
def snowflake_to_unixts(self,snowflake):
return int((snowflake/4194304+1420070400000)/1000)
def unixts_to_snowflake(self,unixts):
return int((unixts*1000-1420070400000)*4194304)
'''
Messages
'''
#create DM
def createDM(self,recipients):
return Messages(self.discord,self.s,self.log).createDM(recipients)
#get recent messages
def getMessages(self,channelID,num=1,beforeDate=None,aroundMessage=None): # num <= 100, beforeDate is a snowflake
return Messages(self.discord,self.s,self.log).getMessages(channelID,num,beforeDate,aroundMessage)
#send text or embed messages
def sendMessage(self,channelID,message,embed="",tts=False):
return Messages(self.discord,self.s,self.log).sendMessage(channelID,message,embed,tts)
#send files (local or link)
def sendFile(self,channelID,filelocation,isurl=False,message=""):
return Messages(self.discord,self.s,self.log).sendFile(channelID,filelocation,isurl,message)
#search messages
def searchMessages(self,guildID,channelID=None,userID=None,mentionsUserID=None,has=None,beforeDate=None,afterDate=None,textSearch=None,afterNumResults=None):
return Messages(self.discord,self.s,self.log).searchMessages(guildID,channelID,userID,mentionsUserID,has,beforeDate,afterDate,textSearch,afterNumResults)
#filter searchMessages, takes in the output of searchMessages (a requests response object) and outputs a list of target messages
def filterSearchResults(self,searchResponse):
return Messages(self.discord,self.s,self.log).filterSearchResults(searchResponse)
#sends the typing action for 10 seconds (or technically until you change the page)
def typingAction(self,channelID):
return Messages(self.discord,self.s,self.log).typingAction(channelID)
#delete message
def deleteMessage(self,channelID,messageID):
return Messages(self.discord,self.s,self.log).deleteMessage(channelID,messageID)
#edit message
def editMessage(self,channelID,messageID,newMessage):
return Messages(self.discord,self.s,self.log).editMessage(channelID, messageID, newMessage)
#pin message
def pinMessage(self,channelID,messageID):
return Messages(self.discord,self.s,self.log).pinMessage(channelID,messageID)
#un-pin message
def unPinMessage(self,channelID,messageID):
return Messages(self.discord,self.s,self.log).unPinMessage(channelID,messageID)
#get pinned messages
def getPins(self,channelID):
return Messages(self.discord,self.s,self.log).getPins(channelID)
#add reaction
def addReaction(self,channelID,messageID,emoji):
return Messages(self.discord,self.s,self.log).addReaction(channelID,messageID,emoji)
#remove reaction
def removeReaction(self,channelID,messageID,emoji):
return Messages(self.discord,self.s,self.log).removeReaction(channelID,messageID,emoji)
#acknowledge message (mark message read)
def ackMessage(self,channelID,messageID,ackToken=None):
return Messages(self.discord,self.s,self.log).ackMessage(channelID,messageID,ackToken)
#unacknowledge message (mark message unread)
def unAckMessage(self,channelID,messageID,numMentions=0):
return Messages(self.discord,self.s,self.log).unAckMessage(channelID,messageID,numMentions)
'''
User relationships
'''
#create outgoing friend request
def requestFriend(self,user): #you can input a userID(snowflake) or a user discriminator
return User(self.discord,self.s,self.log).requestFriend(user)
#accept incoming friend request
def acceptFriend(self,userID):
return User(self.discord,self.s,self.log).acceptFriend(userID)
#remove friend OR unblock user
def removeRelationship(self,userID):
return User(self.discord,self.s,self.log).removeRelationship(userID)
#block user
def blockUser(self,userID):
return User(self.discord,self.s,self.log).blockUser(userID)
'''
Profile edits
'''
# change name
def changeName(self,name):
return User(self.discord,self.s,self.log).changeName(self.email,self.password,name)
# set status
def setStatus(self,status):
return User(self.discord,self.s,self.log).setStatus(status)
# set avatar
def setAvatar(self,imagePath):
return User(self.discord,self.s,self.log).setAvatar(self.email,self.password,imagePath)
'''
Guild/Server stuff
'''
#get guild info from invite code
def getInfoFromInviteCode(self,inviteCode):
return Guild(self.discord,self.s,self.log).getInfoFromInviteCode(inviteCode)
#join guild with invite code
def joinGuild(self,inviteCode):
return Guild(self.discord,self.s,self.log).joinGuild(inviteCode)
#kick a user
def kick(self,guildID,userID,reason=""):
return Guild(self.discord,self.s,self.log).kick(guildID,userID,reason)
#ban a user
def ban(self,guildID,userID,deleteMessagesDays=0,reason=""):
return Guild(self.discord,self.s,self.log).ban(guildID,userID,deleteMessagesDays,reason)
#look up a user in a guild
def getGuildMember(self,guildID,userID):
return Guild(self.discord,self.s,self.log).getGuildMember(guildID,userID)
--- FILE SEPARATOR ---
from .gateway import *
from .sessionsettings import *
--- FILE SEPARATOR ---
import websocket
import json
import time
import random
import base64
if __import__('sys').version.split(' ')[0] < '3.0.0':
import thread
else:
import _thread as thread
from .sessionsettings import SessionSettings
class GatewayServer:
class LogLevel:
SEND = '\033[94m'
RECEIVE = '\033[92m'
WARNING = '\033[93m'
DEFAULT = '\033[m'
class OPCODE: #https://discordapp.com/developers/docs/topics/opcodes-and-status-codes
# Name Code Client Action Description
DISPATCH = 0 # Receive dispatches an event
HEARTBEAT = 1 # Send/Receive used for ping checking
IDENTIFY = 2 # Send used for client handshake
STATUS_UPDATE = 3 # Send used to update the client status
VOICE_UPDATE = 4 # Send used to join/move/leave voice channels
# 5 # ??? ???
RESUME = 6 # Send used to resume a closed connection
RECONNECT = 7 # Receive used to tell clients to reconnect to the gateway
REQUEST_GUILD_MEMBERS = 8 # Send used to request guild members
INVALID_SESSION = 9 # Receive used to notify client they have an invalid session id
HELLO = 10 # Receive sent immediately after connecting, contains heartbeat and server debug information
HEARTBEAT_ACK = 11 # Sent immediately following a client heartbeat that was received
GUILD_SYNC = 12 #
def __init__(self, websocketurl, token, ua_data, proxy_host=None, proxy_port=None, log=True):
self.token = token
self.ua_data = ua_data
self.auth = {
"token": self.token,
"capabilities": 61,
"properties": {
"os": self.ua_data["os"],
"browser": self.ua_data["browser"],
"device": self.ua_data["device"],
"browser_user_agent": self.ua_data["browser_user_agent"],
"browser_version": self.ua_data["browser_version"],
"os_version": self.ua_data["os_version"],
"referrer": "",
"referring_domain": "",
"referrer_current": "",
"referring_domain_current": "",
"release_channel": "stable",
"client_build_number": 71420,
"client_event_source": None
},
"presence": {
"status": "online",
"since": 0,
"activities": [],
"afk": False
},
"compress": False,
"client_state": {
"guild_hashes": {},
"highest_last_message_id": "0",
"read_state_version": 0,
"user_guild_settings_version": -1
}
}
if 'build_num' in self.ua_data and self.ua_data['build_num']!=71420:
self.auth['properties']['client_build_number'] = self.ua_data['build_num']
self.proxy_host = None if proxy_host in (None,False) else proxy_host
self.proxy_port = None if proxy_port in (None,False) else proxy_port
self.log = log
self.interval = None
self.session_id = None
self.sequence = 0
self.READY = False #becomes True once READY_SUPPLEMENTAL is received
self.settings_ready = {}
self.settings_ready_supp = {}
#websocket.enableTrace(True)
self.ws = self._get_ws_app(websocketurl)
self._after_message_hooks = []
self._last_err = None
self.connected = False
self.resumable = False
self.voice_data = {} #voice connections dependent on current (connected) session
#WebSocketApp, more info here: https://github.com/websocket-client/websocket-client/blob/master/websocket/_app.py#L79
def _get_ws_app(self, websocketurl):
sec_websocket_key = base64.b64encode(bytes(random.getrandbits(8) for _ in range(16))).decode() #https://websockets.readthedocs.io/en/stable/_modules/websockets/handshake.html
headers = {
"Host": "gateway.discord.gg",
"Connection": "Upgrade",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"User-Agent": self.ua_data["browser_user_agent"],
"Upgrade": "websocket",
"Origin": "https://discord.com",
"Sec-WebSocket-Version": "13",
"Accept-Language": "en-US",
"Sec-WebSocket-Key": sec_websocket_key
} #more info: https://stackoverflow.com/a/40675547
ws = websocket.WebSocketApp(websocketurl,
header = headers,
on_open=lambda ws: self.on_open(ws),
on_message=lambda ws, msg: self.on_message(ws, msg),
on_error=lambda ws, msg: self.on_error(ws, msg),
on_close=lambda ws: self.on_close(ws)
)
return ws
def on_open(self, ws):
self.connected = True
if self.log: print("Connected to websocket.")
if not self.resumable:
self.send({"op": self.OPCODE.IDENTIFY, "d": self.auth})
else:
self.resumable = False
self.send({"op": self.OPCODE.RESUME, "d": {"token": self.token, "session_id": self.session_id, "seq": self.sequence-1 if self.sequence>0 else self.sequence}})
def on_message(self, ws, message):
self.sequence += 1
resp = json.loads(message)
if self.log: print('%s< %s%s' % (self.LogLevel.RECEIVE, resp, self.LogLevel.DEFAULT))
if resp['op'] == self.OPCODE.HELLO: #only happens once, first message sent to client
self.interval = (resp["d"]["heartbeat_interval"]-2000)/1000
thread.start_new_thread(self._heartbeat, ())
elif resp['op'] == self.OPCODE.INVALID_SESSION:
if self.log: print("Invalid session.")
if self.resumable:
self.resumable = False
self.sequence = 0
self.close()
else:
self.sequence = 0
self.close()
if self.interval == None:
if self.log: print("Identify failed.")
self.close()
if resp['t'] == "READY":
self.session_id = resp['d']['session_id']
self.settings_ready = resp['d']
elif resp['t'] == "READY_SUPPLEMENTAL":
self.resumable = True #completely successful identify
self.settings_ready_supp = resp['d']
self.SessionSettings = SessionSettings(self.settings_ready, self.settings_ready_supp)
self.READY = True
elif resp['t'] in ("VOICE_SERVER_UPDATE", "VOICE_STATE_UPDATE"):
self.voice_data.update(resp['d']) #called twice, resulting in a dictionary with 12 keys
thread.start_new_thread(self._response_loop, (resp,))
def on_error(self, ws, error):
if self.log: print('%s%s%s' % (self.LogLevel.WARNING, error, self.LogLevel.DEFAULT))
self._last_err = error
def on_close(self, ws):
self.connected = False
self.READY = False #reset self.READY
if self.log: print('websocket closed')
#Discord needs heartbeats, or else connection will sever
def _heartbeat(self):
if self.log: print("entering heartbeat")
while self.connected:
time.sleep(self.interval)
if not self.connected:
break
self.send({"op": self.OPCODE.HEARTBEAT,"d": self.sequence-1 if self.sequence>0 else self.sequence})
#just a wrapper for ws.send
def send(self, payload):
if self.log: print('%s> %s%s' % (self.LogLevel.SEND, payload, self.LogLevel.DEFAULT))
self.ws.send(json.dumps(payload))
def close(self):
self.connected = False
self.READY = False #reset self.READY
if self.log: print('websocket closed') #sometimes this message will print twice. Don't worry, that's not an error.
self.ws.close()
#the next 2 functions come from https://github.com/scrubjay55/Reddit_ChatBot_Python/blob/master/Reddit_ChatBot_Python/Utils/WebSockClient.py (Apache License 2.0)
def command(self, func):
self._after_message_hooks.append(func)
return func
def _response_loop(self, resp):
for func in self._after_message_hooks:
if func(resp):
break
def removeCommand(self, func):
try:
self._after_message_hooks.remove(func)
except ValueError:
if self.log: print('%s not found in _after_message_hooks.' % func)
pass
def clearCommands(self):
self._after_message_hooks = []
def resetSession(self): #just resets some variables that in-turn, resets the session (client side). Do not run this while running run().
self.interval = None
self.session_id = None
self.sequence = 0
self.READY = False #becomes True once READY_SUPPLEMENTAL is received
self.settings_ready = {}
self.settings_ready_supp = {}
self._last_err = None
self.voice_data = {}
self.resumable = False #you can't resume anyways without session_id and sequence
#modified version of function run_4ever from https://github.com/scrubjay55/Reddit_ChatBot_Python/blob/master/Reddit_ChatBot_Python/Utils/WebSockClient.py (Apache License 2.0)
def run(self, auto_reconnect=True):
while auto_reconnect:
self.ws.run_forever(ping_interval=10, ping_timeout=5, http_proxy_host=self.proxy_host, http_proxy_port=self.proxy_port)
if isinstance(self._last_err, websocket._exceptions.WebSocketAddressException) or isinstance(self._last_err, websocket._exceptions.WebSocketTimeoutException):
if self.resumable:
waitTime = random.randrange(1,6)
if self.log: print("Connection Dropped. Attempting to resume last valid session in %s seconds." % waitTime)
time.sleep(waitTime)
else:
if self.log: print("Connection Dropped. Retrying in 10 seconds.")
time.sleep(10)
continue
elif not self.resumable: #this happens if you send an IDENTIFY but discord says INVALID_SESSION in response
if self.log: print("Connection Dropped. Retrying in 10 seconds.")
time.sleep(10)
continue
else:
self.resumable = True
return 0
if not auto_reconnect:
self.ws.run_forever(ping_interval=10, ping_timeout=5, http_proxy_host=self.proxy_host, http_proxy_port=self.proxy_port)
--- FILE SEPARATOR ---
from ..Logger import *
import requests
#import requests[socks] #youll need to pip install requests[socks] (this is only if youre using socks)
import json
class Login:
'''
Manages HTTP authentication
'''
def __init__(self, discordurlstart, user_email, user_password,user_agent,proxy_host,proxy_port,log):
self.log = log
self.URL = discordurlstart + "auth/login"
self.__user_email = user_email
self.__user_password = user_password
self.__user_agent = user_agent
self.__proxy_host = proxy_host
self.__proxy_port = proxy_port
self.__token = None
def Connect(self):
session = requests.Session()
if self.__proxy_host not in (None,False):
proxies = {
'http': self.__proxy_host+':'+self.__proxy_port,
'https': self.__proxy_host+':'+self.__proxy_port
}
session.proxies.update(proxies)
session.headers.update({"User-Agent": self.__user_agent})
session.headers.update({'X-Super-Properties': ''})
session.headers.update({"Content-Type": "application/json"})
http_auth_data = '{{"email": "{}", "password": "{}", "undelete": false, "captcha_key": null, "login_source": null, "gift_code_sku_id": null}}'.format(self.__user_email, self.__user_password)
if self.log: Logger.LogMessage('Post -> {}'.format(self.URL))
if self.log: Logger.LogMessage('{}'.format(http_auth_data))
response = session.post(self.URL, data=http_auth_data)
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
self.__token = json.loads(response.content)['token']
def GetToken(self):
if self.__token is None:
self.Connect()
return self.__token
--- FILE SEPARATOR ---
import requests
import json
import base64
from ..Logger import *
class User(object):
def __init__(self, discord, s, log): #s is the requests session object
self.discord = discord
self.s = s
self.log = log
#def getDMs(self): #websockets does this now
# url = self.discord+"users/@me/channels"
# return self.s.get(url)
#def getGuilds(self): #websockets does this now
# url = self.discord+"users/@me/guilds"
# return self.s.get(url)
#def getRelationships(self): #websockets does this now
# url = self.discord+"users/@me/relationships"
# return self.s.get(url)
def requestFriend(self,user):
if "#" in user:
url = self.discord+"users/@me/relationships"
body = {"username": user.split("#")[0], "discriminator": int(user.split("#")[1])}
if self.log: Logger.LogMessage('Post -> {}'.format(url))
if self.log: Logger.LogMessage('{}'.format(str(body)))
response = self.s.post(url, data=json.dumps(body))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
url = self.discord+"users/@me/relationships/"+user
if self.log: Logger.LogMessage('Put -> {}'.format(url))
response = self.s.put(url, data=json.dumps({}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
def acceptFriend(self,userID):
url = self.discord+"users/@me/relationships/"+userID
if self.log: Logger.LogMessage('Put -> {}'.format(url))
response = self.s.put(url, data=json.dumps({}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
def removeRelationship(self,userID): #for removing friends, unblocking people
url = self.discord+"users/@me/relationships/"+userID
if self.log: Logger.LogMessage('Delete -> {}'.format(url))
response = self.s.delete(url)
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
def blockUser(self,userID):
url = self.discord+"users/@me/relationships/"+userID
if self.log: Logger.LogMessage('Put -> {}'.format(url))
if self.log: Logger.LogMessage('{}'.format(str({"type":2})))
response = self.s.put(url, data=json.dumps({"type":2}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
'''
Profile Edits
'''
def changeName(self,email,password,name):
url = self.discord+"users/@me"
if self.log: Logger.LogMessage('Patch -> {}'.format(url))
if self.log: Logger.LogMessage('{}'.format(str({"username":name,"email":email,"password":password})))
response = self.s.patch(url, data=json.dumps({"username":name,"email":email,"password":password}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
def setStatus(self,status):
url = self.discord+"users/@me/settings"
if self.log: Logger.LogMessage('Patch -> {}'.format(url))
if(status == 0): # Online
if self.log: Logger.LogMessage('{}'.format(str({"status":"online"})))
response = self.s.patch(url, data=json.dumps({"status":"online"}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
elif(status == 1): # Idle
if self.log: Logger.LogMessage('{}'.format(str({"status":"idle"})))
response = self.s.patch(url, data=json.dumps({"status":"idle"}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
elif(status == 2): #Do Not Disturb
if self.log: Logger.LogMessage('{}'.format(str({"status":"dnd"})))
response = self.s.patch(url, data=json.dumps({"status":"dnd"}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
elif (status == 3): #Invisible
if self.log: Logger.LogMessage('{}'.format(str({"status":"invisible"})))
response = self.s.patch(url, data=json.dumps({"status":"invisible"}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
elif (status == ''):
if self.log: Logger.LogMessage('{}'.format(str({"custom_status":None})))
response = self.s.patch(url, data=json.dumps({"custom_status":None}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
else:
if self.log: Logger.LogMessage('{}'.format(str({"custom_status":{"text":status}})))
response = self.s.patch(url, data=json.dumps({"custom_status":{"text":status}}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
def setAvatar(self,email,password,imagePath): #local image path
url = self.discord+"users/@me"
if self.log: Logger.LogMessage('Patch -> {}'.format(url))
if self.log: Logger.LogMessage('{}'.format(str({"email":email,"password":password,"avatar":"data:image/png;base64,<encoded image data>","discriminator":None,"new_password":None})))
with open(imagePath, "rb") as image:
encodedImage = base64.b64encode(image.read()).decode('utf-8')
response = self.s.patch(url, data=json.dumps({"email":email,"password":password,"avatar":"data:image/png;base64,"+encodedImage,"discriminator":None,"new_password":None}))
if self.log: Logger.LogMessage('Response <- {}'.format(response.text), log_level=LogLevel.OK)
return response
|
[
"/discum/Logger.py",
"/discum/__init__.py",
"/discum/discum.py",
"/discum/gateway/__init__.py",
"/discum/gateway/gateway.py",
"/discum/login/Login.py",
"/discum/user/user.py"
] |
00-00-00-11/Hummingbird
|
from . import dashboard
from . import home
from . import manage
from . import success
from . import upload
from . import dashboardItem
from . import moreInfoCount
from . import moreInfoGender
from . import moreInfoSalary
from . import moreInfoJobs
--- FILE SEPARATOR ---
from flask import Blueprint, render_template, abort
from lib.dataHandler import *
dashboard = Blueprint('dashboard', __name__,
template_folder='templates')
@dashboard.route('/dashboard')
def show():
return render_template('pages/dashboard.html',
size = 4123,
mfRatio = 51,
meanTc = 251222,
jobCount = 5)
--- FILE SEPARATOR ---
from flask import Blueprint, render_template, abort, request
from lib.dataHandler import *
dashboardItem = Blueprint('dashboardItem', __name__,
template_folder='templates')
@dashboardItem.route('/dashboardItem', methods=['GET','POST'])
def samplefunction():
if (request.method == 'POST'):
print(request.form['fileSub'])
with open("blobs/"+request.form['fileSub']+".json") as json_file:
data = json.load(json_file)
print(data)
num = data['count']
ratio = '%.3f'%data['ratio']
averageComp = data['meanTc']
uniqueJobs = data['jobs']
gend = int(data['p_val_g']*1000)/1000
rac = int(data['p_val_race']*1000)/1000
feedback = data['feedback']
# tValue = data['t value']
# permutations = data['data permutations']
return render_template('pages/dashboardItem.html',
size = num,
mfRatio = ratio,
meanTc = averageComp,
jobCount = uniqueJobs,
p_val_g = gend,
p_val_race = rac,
recommendations = feedback) #,
#tVal = tValue,
#dataPermutations = permutations)
else:
return render_template('pages/dashboardItem.html')
--- FILE SEPARATOR ---
from flask import Blueprint, render_template, abort
home = Blueprint('home', __name__,
template_folder='templates')
@home.route('/')
def show():
return render_template('pages/home.html')
--- FILE SEPARATOR ---
from flask import Blueprint, render_template, abort
import os
manage = Blueprint('manage', __name__,
template_folder='templates')
@manage.route('/manage')
def show():
files = os.listdir('blobs')
for i in range(len(files)):
files[i] = files[i][:-5]
return render_template('pages/manage.html', files = files)
--- FILE SEPARATOR ---
from flask import Blueprint, render_template, abort, request
from lib.dataHandler import *
moreInfoJobs = Blueprint('moreInfoJobs', __name__,
template_folder='templates')
@moreInfoJobs.route('/moreInfoJobs', methods=['GET','POST'])
def samplefunction():
print(request.form)
# permutations = data['data permutations']
return render_template('/pages/moreInfoJobs.html') #,
#tVal = tValue,
#dataPermutations = permutations)
--- FILE SEPARATOR ---
from flask import Blueprint, render_template, abort, request
import csvparser
from subprocess import Popen
success = Blueprint('success', __name__,
template_folder='templates')
@success.route('/success', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
f = request.files['file']
f.save('uploads/' + f.filename)
Popen(['python', 'lib/dataHandler.py', 'uploads/'+ f.filename])
return render_template('forms/success.html', name = f.filename)
--- FILE SEPARATOR ---
from flask import Blueprint, render_template, abort
upload = Blueprint('upload', __name__,
template_folder='templates')
@upload.route('/upload')
def show():
return render_template('pages/upload.html')
--- FILE SEPARATOR ---
import csv
import random
from lib import Gender, Job, Race
"""
Generates a CSV file of sample size N.
input:
N- the sample size
Sample_instructions: a dictionary with instructions on how to bias people
{
key- the metric to be unfair about:
value - a dictionary{
key- the group in question:
value- a number that indicates skew. eg 1.15 > 15% higher pay
}
}
global_mean- a global average that is the relative comparison for all individual groups
global_std- a global std for all.
"""
def generateCSV(sample_size, sample_instructions, global_mean, global_std):
answer = list(sample_instructions) + ["wage"]
for person in range(sample_size):
person_attributes = []
weighed_mean = global_mean
for discriminating_factor in list(sample_instructions):
factor_types = sample_instructions[discriminating_factor]
selected_attribute = random.choice(list(factor_types))
weighed_mean *=factor_types[selected_attribute]
person_attributes += [selected_attribute]
person_attributes += [int(100*random.gauss(weighed_mean, global_std))/100]
answer.append(person_attributes)
createCSV(answer)
return answer
def createCSV(lists):
with open('sampledata.csv', 'w', newline='') as f:
thewriter = csv.writer(f)
thewriter.writerow(['race', 'gender', 'job', 'year', 'salary'])
for row in lists:
thewriter.writerow(row)
instruction = {
'race' : {
'white': 1.5,
'black': 1,
'asian': 1.3,
'latino': 0.8,
'indigenous': .8,
'pacific': .9,
},
'gender' : {
'male': 1,
'female': 0.73,
},
'job' : {
'Alcohol Beverage Purchasing Specialist': .5,
'deputy sheriff': 1,
'sheriff': 1.5,
'Executive': 10
}
}
for person in generateCSV(1500, instruction, 100000, 10000):
print (person)
--- FILE SEPARATOR ---
import csv
def parseCSV(file_name):
myList = []
with open(file_name, 'r') as file_o_data:
#csv_data = csv.reader(file_o_data)#gives an iterable
for row in csv.reader(file_o_data):
myList.append(row)
print(myList)
return myList
processed_data = {'M':[],
'F':[]} #gender:annual salary
next(csv_data)
for datapoint in csv_data:
processed_data[datapoint[0]].append(datapoint[1])
print("the average male pay is", sum([int(float(i)) for i in processed_data['M']])/len(processed_data['M']))
"""
Takes DATA, an iterable, and sorts the DATA by the
COLUMN_SORT and returns it as a dictionary where each different type
in COLUMN_GROUP has its relevant COLUMN_SORTs listed as a dictionary value.
"""
def sort_by(data, column_sort, column_group ):
assert len(data)>1, "There is no data in the file!"
header, data = data[0], data[1:]
try:
group_ind = header.index(column_group)
sort_ind = header.index(column_sort)
except ValueError:
return "Error: the request is not represented by the data"
sorted_data = {}
for data_point in data:
grouper = data_point[group_ind]
sort_value = data_point[sort_ind]
if grouper not in sorted_data:
sorted_data[grouper] = [sort_value]
else:
sorted_data[grouper] += [sort_value]
return sorted_data
# test_data = [['money', 'race'], [-100, 'white'], [25000, 'asian'], [26000, 'asian'], [1000000, 'egyptian'], [1000, 'white']]
# sorted_test_data = sort_by(test_data, "money", "race")
"""
filter_group takes in a dataset and column to filter by (creating something like a "race-filter",
then takes in a name of the grouped variable (e.g. white))
filtergroup (test_data, race)(white)
>>> [[-100, 'white'], [1000, 'white']]
"""
# filter_group = lambda dataset, col: lambda var: list(filter (lambda row: row[dataset[0].index(col)] == var, dataset))
# print(filter_group(test_data, "race")("asian"))
def mean_data(sorted_data):
return {grouper: (sum(values)/len(values)) for grouper, values in sorted_test_data.items() }
# print(mean_data(test_data))
"""
Filters a CSV into several Lists, currently supported lists are categories, gender (index 0), annualSalary(index 1), Employee Title (index 2), and race (index 3)
"""
def filterCSV(file_name):
with open(file_name, 'r') as file_o_data:
csv_data = csv.reader(file_o_data) #gives an iterable
categories = []
gender = []
annualSalary = []
race = []
employeeTitle = []
#gender:annual salary
for specData in next(csv_data):
categories.append(specData)
print(categories)
for datapoint in csv_data:
index = 0
for specificData in datapoint:
#print(specificData)
if ("gender" in categories and index == categories.index("gender")):
gender.append(specificData)
elif ("current annual salary" in categories and index == categories.index("current annual salary")):
annualSalary.append(specificData)
elif ("race" in categories and index == categories.index("race")):
race.append(specificData)
if ("employee position title" in categories or "position title" in categories or "job" in categories):
if ("employee position title" in categories):
if (index == categories.index("employee position title")):
employeeTitle.append(specificData)
elif ("position title" in categories):
if (index == categories.index("position title")):
employeeTitle.append(specificData)
elif ("job" in categories):
if (index == categories.index("job")):
employeeTitle.append(specificData)
#elif (index == categories.index("Employee Position Title") or index == categories.index("Position Title")):
# employeeTitle.append(specificData)
index += 1
return [gender, annualSalary, employeeTitle, race]
#gender = 'M' or 'F'
def genderSalaryAVG(arr, seekGender):
gender = arr[0]
annualSalary = arr[1]
if ((seekGender != 'M' and seekGender != 'F') or gender == []):
return
totalAnn = 0
index = 0
count = 0
for data in gender:
if (data.lower() == seekGender.lower() and annualSalary[index] != ''):
totalAnn += float(annualSalary[index])
count += 1
index += 1
print("Average annual salary for gender: "+seekGender+", is "+(str(int(totalAnn/count))))
return (str(int(totalAnn/count)))
def raceAVG(arr, seekRace):
race = arr[3]
annualSalary = arr[1]
if (seekRace == [] or race == [] or annualSalary == []):
return
totalAnn = 0
index = 0
count = 0
for data in race:
if (data.lower() == seekRace.lower() and annualSalary[index] != ''):
totalAnn += float(annualSalary[index])
count += 1
index += 1
print("Average annual salary for race: "+seekRace+", is "+(str(int(totalAnn/count))))
return (str(int(totalAnn/count)))
--- FILE SEPARATOR ---
from enum import Enum
class DataSections(Enum):
RACE = 0
GENDER = 1
JOB = 2
SENIORITY = 3
SALARY = 4
--- FILE SEPARATOR ---
from enum import Enum
class Gender(Enum):
MALE = 0
FEMALE = 1
--- FILE SEPARATOR ---
from enum import Enum
class Job(Enum):
JANITOR = 0
CASHIER = 1
ENGINEER = 2
EXECUTIVE = 3
--- FILE SEPARATOR ---
# Imports
from numpy import loadtxt
from keras.models import Sequential
from keras.layers import Dense
def Learn():
categories = 3
temp = 'generated.csv'
dataset = loadtxt(temp, delimiter=',')
inputs = dataset[:,0:categories]
outputs = dataset[:,categories]
model = Sequential()
model.add(Dense(12, input_dim = categories, activation = 'relu'))
model.add(Dense(8, activation = 'relu'))
model.add(Dense(1, activation = 'sigmoid'))
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
model.fit(inputs, outputs, epochs = 150, batch_size = 10)
# Evaluation
_, accuracy = model.evaluate(inputs, outputs)
print('Accuracy: %2.f' % (accuracy * 100))
def main():
print("Learn has been activited! It should do nothing.")
main()
--- FILE SEPARATOR ---
from enum import Enum
class Race(Enum):
WHITE = 0
BLACK = 1
ASIAN = 2
LATINO = 3
INDIGENOUS = 4
PACIFIC = 5
--- FILE SEPARATOR ---
from . import csvTasks
from . import Gender
# from . import Learn
--- FILE SEPARATOR ---
import Gender
Gender = Gender.Gender
import Job
Job = Job.Job
import Race
Race = Race.Race
import DataSections
DataSections = DataSections.DataSections
import disparitySearch
import dataHandler
--- FILE SEPARATOR ---
import csv
import random
import math
instruction = {
'race' : {
0: 1.5, # White
1: .9, # Black
2: 1.2, # Asian
3: 0.8, # Latino
4: .7, # Indigenous
5: .8, # Pacific
},
'gender' : {
0: 1, # Male
1: 0.83, # Female
},
'job' : {
0: .5, # Janitor
1: 1, # Cashier
2: 1.5, # Engineer
3: 10 # Executive
},
'year' : {
0: 0.8, # Janitor
1: 0.9, # Cashier
2: 0.95, # Engineer
3: 1 # Executive
}
}
test_instruction = {
'race' : {
0: 1, # White
1: 1, # Black
2: 1, # Asian
3: 1, # Latino
4: 1, # Indigenous
5: 1, # Pacific
},
'gender' : {
0: 1, # Male
1: 1, # Female
},
'job' : {
0: 1, # Janitor
1: 1, # Cashier
2: 1, # Engineer
3: 1 # Executive
},
'year' : {
0: 1, # Janitor
1: 1.2, # Cashier
2: 2, # Engineer
3: 5 # Executive
}
}
def parse(file):
with open(file, 'r') as data:
csvData = csv.reader(data)
return csvData
def generateCSV(sample_size, sample_instructions, global_mean, global_std):
answer = []
for person in range(sample_size):
person_attributes = []
weighed_mean = global_mean
for discriminating_factor in list(sample_instructions):
factor_types = sample_instructions[discriminating_factor]
selected_attribute = random.choice(list(factor_types))
weighed_mean *= factor_types[selected_attribute]
person_attributes += [selected_attribute]
person_attributes += [math.floor(abs(int(100*random.gauss(weighed_mean, global_std))/100))]
answer.append(person_attributes)
createCSV(answer)
return answer
def createCSV(lists):
with open('rlyunfairsampledata.csv', 'w', newline='') as f:
thewriter = csv.writer(f)
thewriter.writerow(['race', 'gender', 'job', 'salary'])
for row in lists:
thewriter.writerow(row)
def main():
for person in generateCSV(1500, instruction, 100000, 10000):
print(person)
--- FILE SEPARATOR ---
import csv
import json
import math
import statistics
import sys
from scipy import stats
import numpy as np
import random
sys.path.append('lib')
import Gender
Gender = Gender.Gender
import Job
Job = Job.Job
import Race
Race = Race.Race
import DataSections
DataSections = DataSections.DataSections
def parse(file_name):
data = []
with open(file_name, 'r') as file:
for row in csv.reader(file):
data.append(row)
if "MONT" in file_name:
mapfn = lambda data_entry: [random.randint(0, 5), int(data_entry[1] == "F"), random.randint(0, 3), random.randint(0,6), int(float(data_entry[2]))]
new_data = [datapoint for datapoint in map(mapfn,data[1:])]
return new_data[1:200]
return data[1:]
def splitCols(data):
race = []
gender = []
job = []
year = []
salary = []
for i in data:
race.append(int(i[0]))
gender.append(int(i[1]))
try:
job.append(int(i[2]))
except ValueError:
job.append(i[2])
year.append(int(i[3]))
salary.append(int(i[4]))
return race, gender, job, year, salary
def singleFilter(labels, values, criteria):
"""
singleFilter: filters a list based on the contents of another list
Paramters:
* labels: a list containing the objects you are searching for
* values: a list containing the values you want to return at
the index the label you are searching for is located
* criteria: an object identical to the type stored in list that will
be compared to objects inside labels
Description:
The function iterates through labels, looking for matches to
criteria, When a match is found, the item located at the same
index in values is added to a new list, which is then returned
after the entire list has been iterated through.
"""
data = []
for i in range(len(labels)):
if criteria == labels[i]:
data.append(values[i])
return data
def mean(lst):
return sum(lst) / len(lst)
def meanOf(labels, values, criteria):
data = singleFilter(labels, values, criteria)
return sum(data) / len(data)
# Find standard deviation
def sigma(lst):
return statistics.stdev(lst)
# Find standard deviation of criteria
def sigmaOf(labels, values, criteria):
data = singleFilter(labels, values, criteria)
return statistics.stdev(data)
# Returns the percentage of criteria in a list
def ratio(lst, criteria):
data = [x for x in lst if x == criteria]
return len(data) / len(lst)
def unique(lst):
return list(dict.fromkeys(lst))
# Generate a dashboard summary
def dashSum(ppl, job, salary):
return len(ppl), 100*ratio(ppl, Gender.MALE.value), math.floor(mean(salary)), len(unique(job))
def findAllT(race, gender, job, year, salary):
allT = {}
allT['race'] = {}
for r in range(len(Race)):
for i in range(r + 1, len(Race)):
raceListA = singleFilter(race, salary, r)
raceListB = singleFilter(race, salary, i)
allT['race'][(r + 1) * (i + 1)] = stats.ttest_ind(raceListA, raceListB)
allT['gender'] = {}
for g in range(len(Gender)):
for i in range(g + 1, len(Gender)):
genderListA = singleFilter(gender, salary, g)
genderListB = singleFilter(gender, salary, i)
allT['gender'][(g + 1) * (i + 1)] = stats.ttest_ind(genderListA, genderListB)
allT['job'] = {}
for j in range(len(Job)):
for i in range(j + 1, len(Job)):
print(i, j)
jobListA = singleFilter(job, salary, j)
jobListB = singleFilter(job, salary, i)
print (jobListA, jobListB)
print('endtest')
allT['job'][(j + 1) * (i + 1)] = stats.ttest_ind(jobListA, jobListB)
return allT
def pt_score_calc(data1, data2):
c1 = (sigma(data1)**2)/len(data1)
c2 = (sigma(data2)**2)/len(data2)
m1 = mean(data1)
m2 = mean(data2)
denom= math.sqrt(c1+c2)
tVal = (m1-m2)/denom
return tVal
def search_disparity(data, col, first, second):
data = parse(data)
data = splitCols(data)
data1 = singleFilter(data[col.value], data[DataSections.SALARY.value], first)
if second > -1:
data2 = singleFilter(data[col.value], data[DataSections.SALARY.value], second)
else:
data2 = data[DataSections.SALARY.value]
return pt_score_calc(data1, data2)
"""Takes an interable and finds all possible, non duplicating possible pairs
returns: a list of tuples
"""
def generate_combinations(iterable):
result = []
avoid = []
for iteration in iterable:
for iteration2 in iterable:
if iteration2 not in avoid and iteration2 is not iteration:
result += [(iteration, iteration2)]
avoid += [iteration]
return result
"""
def complete_data_analysis(datasetURL):
else:
results = {}
#binary gender analysis
results[(Gender.MALE, Gender.FEMALE)] = search_disparity('sampledata.csv', DataSections.GENDER, Gender.MALE.value, Gender.FEMALE.value)
#race analysis
for combination in generate_combinations(Race):
results[combination] = search_disparity(datasetURL, DataSections.RACE, combination[0].value, combination[1].value )
#job analysis
for combination in generate_combinations(Job):
results[combination] = search_disparity(datasetURL, DataSections.JOB, combination[0].value, combination[1].value )
return results
"""
def main():
print("Begun handling of data with", sys.argv)
argumentList = sys.argv[1:]
data = parse(argumentList[0])
# ['race', 'gender', 'job', 'year', 'salary']
race, gender, job, year, salary = splitCols(data)
count, ratio, meanTc, jobs = dashSum(gender, job, salary)
maleSalary = singleFilter(gender, salary, Gender.MALE.value)
maleSalary = sum(maleSalary) / len(maleSalary)
femaleSalary = singleFilter(gender, salary, Gender.FEMALE.value)
femaleSalary = sum(femaleSalary) / len(femaleSalary)
print(maleSalary)
print(femaleSalary)
# t, p = stats.ttest_ind(maleSalary, femaleSalary)
# print("t and p:", t, p)
allT = findAllT(race, gender, job, year, salary)
print(allT)
p_val_g= abs(allT["gender"][2][1])
p_val_race= abs(min([allT['race'][key] for key in allT['race']][1]))
print("p vals", p_val_g, p_val_race)
# tVal = search_disparity(argumentList[0], DataSections.GENDER, Gender.MALE.value, Gender.FEMALE.value)
# comprehensive_data_analysis = complete_data_analysis(argumentList[0])
recommendations = []
if (ratio < 45):
recommendations.append("Your company favors women in the hiring process (by about "+(str2(2*abs(float(50 - ratio))))+"%)! Try to balance out your company!")
elif (ratio > 55):
recommendations.append("Your company favors men in the hiring process (by about "+(str(2*abs(float(50 - ratio))))+"%)! Try to balance out your company!")
else:
recommendations.append("Fantastic job in maintaining a balance of both men and women in your workplace! Keep it up.")
if (jobs < 10):
recommendations.append("Your company is lacking a diverse set of jobs. Try to compartamentalize your employees' duties more!")
elif (jobs >= 10):
recommendations.append("Great job maintaining a diverse set of jobs for your employees!")
if (maleSalary - femaleSalary > 9000):
recommendations.append("Your company has a bias when it comes to paying men over women. (A difference of $"+str(abs(int(femaleSalary - maleSalary)))+") Try to balance out your payrolls!")
elif (femaleSalary - maleSalary > 9000):
recommendations.append("Your company has a bias when it comes to paying women over men. (A difference of $"+str(abs(int(femaleSalary - maleSalary)))+") Try to balance out your payrolls!")
else:
recommendations.append("Great job maintaing balanced and equal payrolls for all of your employees!")
dump = {
"count": count,
"ratio": ratio,
"meanTc": meanTc,
"jobs": jobs,
"t_vals": allT,
"p_val_g": p_val_g,
"p_val_race": p_val_race,
"feedback": recommendations,
# "t value": tVal,
# "permutations": comprehensive_data_analysis,
#"p value": pVal,
}
with open('blobs/' + argumentList[0][7:-3] + "json", 'w') as file:
json.dump(dump, file)
print("[dataHandler] saved!")
if len(sys.argv) > 1:
main()
--- FILE SEPARATOR ---
import csv
from datetime import datetime
import json
import requests
from time import sleep
# url = "https://www.fedsdatacenter.com/federal-pay-rates/output.php?sColumns=,,,,,,,,&iDisplayStart=0&iDisplayLength=100"
url_prepend = "https://www.fedsdatacenter.com/federal-pay-rates/output.php?sColumns=,,,,,,,,&iDisplayStart="
url_append = "&iDisplayLength=100"
payload = {}
headers= {}
today = datetime.today()
date = str(today.year) + "-" + str(today.month) + \
"-" + str(today.day) + "-" + str(today.hour) + str(today.minute)
table = open('FedsDataCenter-' + date + '.csv', 'w', newline='')
writer = csv.writer(table, delimiter=',')
writer.writerow(['name', 'grade', 'plan', 'salary', 'bonus', 'agency', 'location', 'occupation', 'fy'])
start = 12300
end = 21083
pages = 21083
for i in range(start, end):
print("Downloading page", i + 1, "of", pages,"..." ,end=" ")
url = url_prepend + str(i * 100) + url_append
response = requests.request("GET", url, headers=headers, data = payload)
data = response.text.encode('utf8')
parsed = json.loads(data)
for item in parsed['aaData']:
# print(item)
writer.writerow(item)
print("Done!")
if (i + 1) % 1000 == 0:
print("Sleeping for a half minute...")
sleep(30)
continue
if (i + 1) % 100 == 0:
print("Sleeping for a 5 seconds...")
sleep(5)
continue
# print(response.text.encode('utf8'))
|
[
"/controllers/__init__.py",
"/controllers/dashboard.py",
"/controllers/dashboardItem.py",
"/controllers/home.py",
"/controllers/manage.py",
"/controllers/moreInfoJobs.py",
"/controllers/success.py",
"/controllers/upload.py",
"/csvgenerator.py",
"/csvparser.py",
"/lib/DataSections.py",
"/lib/Gender.py",
"/lib/Job.py",
"/lib/Learn.py",
"/lib/Race.py",
"/lib/__init__.py",
"/lib/completeDataAnalysis.py",
"/lib/csvTasks.py",
"/lib/dataHandler.py",
"/payroll-datasets/scripts/FedsDataCenter.py"
] |
00-00-00-11/News-Suggestions-Using-ML
|
from tqdm import tqdm
import numpy as np
import random, math, time
from scipy.special import psi
from preprocessing import preprocessing, maxItemNum
from retrieve_articles import retrieve_articles
docs, word2id, id2word = preprocessing()
# The number of documents we'll be using to train the model.
N = len(docs)
# number of distinct terms
M = len(word2id)
# number of topics
T = 10
# iteration times of variational inference, judgment of the convergence by calculating likelihood is omitted
iterInference = 35
# iteration times of variational EM algorithm, judgment of the convergence by calculating likelihood is omitted
iterEM = 50
# initial value of hyperparameter alpha
alpha = 5
# sufficient statistic of alpha
alphaSS = 0
# the topic-word distribution (beta in D. Blei's paper)
# Passing the list [T,M] in as an argument for np.zeros creates a matrix of T-by-M zeros.
varphi = np.zeros([T, M])
# topic-word count, this is a sufficient statistic to calculate varphi
nzw = np.zeros([T, M])
# topic count, sum of nzw with w ranging from [0, M-1], for calculating varphi
nz = np.zeros([T])
# inference parameter gamma
gamma = np.zeros([N, T])
# inference parameter phi
phi = np.zeros([maxItemNum(N, docs), T])
def initializeLdaModel():
for z in range(0, T):
for w in range(0, M):
nzw[z, w] += 1.0/M + random.random()
nz[z] += nzw[z, w]
updateVarphi()
# update model parameters : varphi (the update of alpha is ommited)
def updateVarphi():
for z in range(0, T):
for w in range(0, M):
if(nzw[z, w] > 0):
varphi[z, w] = math.log(nzw[z, w]) - math.log(nz[z])
else:
varphi[z, w] = -100
# update variational parameters : gamma and phi
def variationalInference(docs, d, gamma, phi):
phisum = 0
#Creates an numpy array containing a list of zeros with length equal to the number of topics.
oldphi = np.zeros([T])
digamma_gamma = np.zeros([T])
for z in range(0, T):
gamma[d][z] = alpha + docs[d].wordCount * 1.0 / T
digamma_gamma[z] = psi(gamma[d][z])
for w in range(0, len(docs[d].itemIdList)):
phi[w, z] = 1.0 / T
for iteration in tqdm(range(0, iterInference)):
for w in range(0, len(docs[d].itemIdList)):
phisum = 0
for z in range(0, T):
oldphi[z] = phi[w, z]
phi[w, z] = digamma_gamma[z] + varphi[z, docs[d].itemIdList[w]]
if z > 0:
phisum = math.log(math.exp(phisum) + math.exp(phi[w, z]))
else:
phisum = phi[w, z]
for z in range(0, T):
phi[w, z] = math.exp(phi[w, z] - phisum)
gamma[d][z] = gamma[d][z] + docs[d].itemCountList[w] * (phi[w, z] - oldphi[z])
digamma_gamma[z] = psi(gamma[d][z])
# initialization of the model parameter varphi, the update of alpha is ommited
initializeLdaModel()
print("Checkpoint") #Track Preprocessing Progress
# variational EM Algorithm
for iteration in tqdm(range(0, iterEM)):
nz = np.zeros([T])
nzw = np.zeros([T, M])
alphaSS = 0
# EStep
for d in tqdm(range(0, N)):
variationalInference(docs, d, gamma, phi)
gammaSum = 0
for z in range(0, T):
gammaSum += gamma[d, z]
alphaSS += psi(gamma[d, z])
alphaSS -= T * psi(gammaSum)
for w in range(0, len(docs[d].itemIdList)):
for z in range(0, T):
nzw[z][docs[d].itemIdList[w]] += docs[d].itemCountList[w] * phi[w, z]
nz[z] += docs[d].itemCountList[w] * phi[w, z]
# MStep
updateVarphi()
# calculate the top 10 terms of each topic
topicwords = []
maxTopicWordsNum = 10
for z in range(0, T):
ids = varphi[z, :].argsort()
topicword = []
for j in ids:
topicword.insert(0, id2word[j])
topicwords.append([topicword[0 : min(10, len(topicword))],j])
counter = 1
for item in topicwords:
print(f"Topic {counter}: {item[0]}")
counter+=1
#print(phi)
print('Complete.')
#Write results to file.
with open("results.txt","w+") as file:
for index, item in enumerate(topicwords):
file.write(f"Topic {index+1}: {item[0]} \n")
for item in topicwords:
file.write('\n'+' '.join(item[0])+'\n')
query = ' '.join(item[0])
file.write(retrieve_articles(query))
time.sleep(5)
--- FILE SEPARATOR ---
from newsapi import NewsApiClient
# Init
def retrieve_articles_newsapi():
newsapi = NewsApiClient(api_key='2050df7a6a014501a04c5f42fa6eef54')
# /v2/top-headlines
top_headlines = newsapi.get_top_headlines(q='sector OR big OR corporate OR product OR investor OR pointed OR gavekal OR sovereign OR vincent OR louis',
sources='bbc-news,the-verge',
language='en')
# /v2/everything
all_articles = newsapi.get_everything(q='reality OR long OR central OR capital OR political OR dollars OR trading OR algorithmic OR banks OR released',
sources='bbc-news, the-verge, the-wall-street-journal, the-washington-post, the-hill',
domains='bbc.co.uk, techcrunch.com, ft.com, economist.com, wsj.com, thewashingtonpost.com',
from_param='2019-07-18',
to='2019-08-12',
language='en',
sort_by='relevancy')
# /v2/sources
sources = newsapi.get_sources()
for article in all_articles['articles']:
print(article)
print('\n')
retrieve_articles_newsapi()
--- FILE SEPARATOR ---
from tqdm import tqdm
from split_into_sentences import split_into_sentences
import numpy as np
import codecs, jieba, re, random, math
from scipy.special import psi
# wordCount : the number of total words (not terms)
# itemIdList : the list of distinct terms in the document
# itemCountList : the list of number of the existence of corresponding terms
class Document:
def __init__(self, itemIdList, itemCountList, wordCount):
self.itemIdList = itemIdList
self.itemCountList = itemCountList
self.wordCount = wordCount
# Preprocessing - filter out stopwords, handle segmentation, and use the class Document to represent all documents in the text sample.
def preprocessing():
# read in all stopwords to be filtered out.
file = codecs.open('stopwords.dic','r','utf-8')
stopwords = [line.strip() for line in file]
#print(stopwords)
file.close()
# the document to read and produce topics from
with open('sample.txt','r') as fh:
all_lines = fh.readlines()
str_all_lines = ' '.join(all_lines).replace('\n','')
raw_documents = split_into_sentences(str_all_lines)
# Check that sentence splitting has worked.
# print(raw_documents)
# Group 4 sentences as a document.
documents = []
i=0
while i < len(raw_documents)-4:
documents.append(raw_documents[i]+'\n'+raw_documents[i+1]+raw_documents[i+2]+'\n'+raw_documents[i+3]+'\n')
i+=4
docs = []
word2id = {}
id2word = {}
currentWordId = 0
for document in documents:
#word2Count is a dictionary, essentially a hashmap with the number of occurrences of each word in a sentence.
word2Count = {}
# Create generator objects for each word in the string, cuts on whole words and punctuation.
segList = jieba.cut(document)
for word in segList:
word = word.lower().strip()
# Get rid of items that are punctuation, numbers, or stopwords.
if len(word) > 1 and not re.search('[0-9]', word) and word not in stopwords:
if word not in word2id:
word2id[word] = currentWordId
id2word[currentWordId] = word
currentWordId += 1
if word in word2Count:
word2Count[word] += 1
else:
word2Count[word] = 1
itemIdList = []
itemCountList = []
wordCount = 0
for word in word2Count.keys():
itemIdList.append(word2id[word])
itemCountList.append(word2Count[word])
wordCount += word2Count[word]
docs.append(Document(itemIdList, itemCountList, wordCount))
return docs, word2id, id2word
def maxItemNum(N, docs):
num = 0
for d in range(0, N):
if len(docs[d].itemIdList) > num:
num = len(docs[d].itemIdList)
return num
--- FILE SEPARATOR ---
# Dependencies
import requests
import time
from pprint import pprint
def retrieve_articles(query):
url = "https://api.nytimes.com/svc/search/v2/articlesearch.json?"
# Store a search term
#query = "groups may white reform immigration federation american trump including nation"
#fq = "money"
# Search for articles published between a begin and end date
begin_date = "20190101"
end_date = "20190818"
#filter
query_url = f"{url}api-key=db1Vnm2AtlDDvNGJwu5izccRSafP0DGl&q={query}&begin_date={begin_date}&end_date={end_date}"
# Empty list for articles
articles_list = []
ignore_terms =["marriage","wedding","pregnancy",'adventure']
# loop through pages for more results.
for page in range(0, 4):
query_url = f"{url}api-key=db1Vnm2AtlDDvNGJwu5izccRSafP0DGl&q={query}&begin_date={begin_date}&end_date={end_date}"
# create query with page number
query_url = f"{query_url}&page={str(page)}"
articles = requests.get(query_url).json()
# Add a one second interval between queries to stay within API query limits
time.sleep(1)
# loop through the response and append each article to the list
for article in articles["response"]["docs"]:
x = f'{article["snippet"]} {article["web_url"]}'
articles_list.append(x)
#get rid of terms in articles irrelevant to what you are searching.
for element in ignore_terms:
if element in x:
articles_list.pop()
string_articles_list = ''
for x,y in enumerate(articles_list):
print(f'{x+1}. {y} \n')
string_articles_list += f'{x+1}. {y} \n'
return string_articles_list
'''
# Retrieve articles
articles = requests.get(query_url).json()
articles_list = [article for article in articles["response"]["docs"]]
#print(articles_list)
for article in articles_list:
print(f'{article["snippet"]} {article["web_url"]} \n')
'''
|
[
"/keyword_extractor.py",
"/news_api.py",
"/preprocessing.py",
"/retrieve_articles.py"
] |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 41