0.1.51
This commit is contained in:
parent
f73c48d199
commit
7483f4484b
@ -1,7 +1,7 @@
|
|||||||
[metadata]
|
[metadata]
|
||||||
# replace with your username:
|
# replace with your username:
|
||||||
name = guan
|
name = guan
|
||||||
version = 0.1.50
|
version = 0.1.51
|
||||||
author = guanjihuan
|
author = guanjihuan
|
||||||
author_email = guanjihuan@163.com
|
author_email = guanjihuan@163.com
|
||||||
description = An open source python package
|
description = An open source python package
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
Metadata-Version: 2.1
|
Metadata-Version: 2.1
|
||||||
Name: guan
|
Name: guan
|
||||||
Version: 0.1.50
|
Version: 0.1.51
|
||||||
Summary: An open source python package
|
Summary: An open source python package
|
||||||
Home-page: https://py.guanjihuan.com
|
Home-page: https://py.guanjihuan.com
|
||||||
Author: guanjihuan
|
Author: guanjihuan
|
||||||
|
@ -10,6 +10,7 @@ src/guan/band_structures_and_wave_functions.py
|
|||||||
src/guan/basic_functions.py
|
src/guan/basic_functions.py
|
||||||
src/guan/data_processing.py
|
src/guan/data_processing.py
|
||||||
src/guan/density_of_states.py
|
src/guan/density_of_states.py
|
||||||
|
src/guan/machine_learning.py
|
||||||
src/guan/others.py
|
src/guan/others.py
|
||||||
src/guan/quantum_transport.py
|
src/guan/quantum_transport.py
|
||||||
src/guan/topological_invariant.py
|
src/guan/topological_invariant.py
|
||||||
|
@ -17,5 +17,6 @@ from .Green_functions import *
|
|||||||
from .density_of_states import *
|
from .density_of_states import *
|
||||||
from .quantum_transport import *
|
from .quantum_transport import *
|
||||||
from .topological_invariant import *
|
from .topological_invariant import *
|
||||||
|
from .machine_learning import *
|
||||||
from .data_processing import *
|
from .data_processing import *
|
||||||
from .others import *
|
from .others import *
|
@ -4,7 +4,10 @@ import guan
|
|||||||
# 测试
|
# 测试
|
||||||
@guan.function_decorator
|
@guan.function_decorator
|
||||||
def test():
|
def test():
|
||||||
print('\nSuccess in the installation of Guan package!\n')
|
import guan
|
||||||
|
current_version = guan.get_current_version('guan')
|
||||||
|
print(f'\nSuccess in the installation of Guan package! The installed version is guan-{current_version}.\n')
|
||||||
|
guan.notification_of_upgrade(timeout=5)
|
||||||
|
|
||||||
# 泡利矩阵
|
# 泡利矩阵
|
||||||
@guan.function_decorator
|
@guan.function_decorator
|
||||||
|
115
PyPI/src/guan/machine_learning.py
Normal file
115
PyPI/src/guan/machine_learning.py
Normal file
@ -0,0 +1,115 @@
|
|||||||
|
# Module: machine_learning
|
||||||
|
import guan
|
||||||
|
|
||||||
|
# 全连接神经网络模型(包含一个隐藏层)
|
||||||
|
@guan.function_decorator
|
||||||
|
def fully_connected_neural_network_with_one_hidden_layer(input_size=1, hidden_size=10, output_size=1, activation='relu'):
|
||||||
|
import torch
|
||||||
|
class model_class(torch.nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.hidden_layer = torch.nn.Linear(input_size, hidden_size)
|
||||||
|
self.output_layer = torch.nn.Linear(hidden_size, output_size)
|
||||||
|
def forward(self, x):
|
||||||
|
if activation == 'relu':
|
||||||
|
hidden_output = torch.nn.functional.relu(self.hidden_layer(x))
|
||||||
|
elif activation == 'leaky_relu':
|
||||||
|
hidden_output = torch.nn.functional.leaky_relu(self.hidden_layer(x))
|
||||||
|
elif activation == 'sigmoid':
|
||||||
|
hidden_output = torch.nn.functional.sigmoid(self.hidden_layer(x))
|
||||||
|
elif activation == 'tanh':
|
||||||
|
hidden_output = torch.nn.functional.tanh(self.hidden_layer(x))
|
||||||
|
else:
|
||||||
|
hidden_output = self.hidden_layer(x)
|
||||||
|
output = self.output_layer(hidden_output)
|
||||||
|
return output
|
||||||
|
model = model_class()
|
||||||
|
return model
|
||||||
|
|
||||||
|
# 全连接神经网络模型(包含两个隐藏层)
|
||||||
|
@guan.function_decorator
|
||||||
|
def fully_connected_neural_network_with_two_hidden_layers(input_size=1, hidden_size_1=10, hidden_size_2=10, output_size=1, activation_1='relu', activation_2='relu'):
|
||||||
|
import torch
|
||||||
|
class model_class(torch.nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.hidden_layer_1 = torch.nn.Linear(input_size, hidden_size_1)
|
||||||
|
self.hidden_layer_2 = torch.nn.Linear(hidden_size_1, hidden_size_2)
|
||||||
|
self.output_layer = torch.nn.Linear(hidden_size_2, output_size)
|
||||||
|
def forward(self, x):
|
||||||
|
if activation_1 == 'relu':
|
||||||
|
hidden_output_1 = torch.nn.functional.relu(self.hidden_layer_1(x))
|
||||||
|
elif activation_1 == 'leaky_relu':
|
||||||
|
hidden_output_1 = torch.nn.functional.leaky_relu(self.hidden_layer_1(x))
|
||||||
|
elif activation_1 == 'sigmoid':
|
||||||
|
hidden_output_1 = torch.nn.functional.sigmoid(self.hidden_layer_1(x))
|
||||||
|
elif activation_1 == 'tanh':
|
||||||
|
hidden_output_1 = torch.nn.functional.tanh(self.hidden_layer_1(x))
|
||||||
|
else:
|
||||||
|
hidden_output_1 = self.hidden_layer_1(x)
|
||||||
|
|
||||||
|
if activation_2 == 'relu':
|
||||||
|
hidden_output_2 = torch.nn.functional.relu(self.hidden_layer_2(hidden_output_1))
|
||||||
|
elif activation_2 == 'leaky_relu':
|
||||||
|
hidden_output_2 = torch.nn.functional.leaky_relu(self.hidden_layer_2(hidden_output_1))
|
||||||
|
elif activation_2 == 'sigmoid':
|
||||||
|
hidden_output_2 = torch.nn.functional.sigmoid(self.hidden_layer_2(hidden_output_1))
|
||||||
|
elif activation_2 == 'tanh':
|
||||||
|
hidden_output_2 = torch.nn.functional.tanh(self.hidden_layer_2(hidden_output_1))
|
||||||
|
else:
|
||||||
|
hidden_output_2 = self.hidden_layer_2(hidden_output_1)
|
||||||
|
|
||||||
|
output = self.output_layer(hidden_output_2)
|
||||||
|
return output
|
||||||
|
model = model_class()
|
||||||
|
return model
|
||||||
|
|
||||||
|
# 全连接神经网络模型(包含三个隐藏层)
|
||||||
|
@guan.function_decorator
|
||||||
|
def fully_connected_neural_network_with_three_hidden_layers(input_size=1, hidden_size_1=10, hidden_size_2=10, hidden_size_3=10, output_size=1, activation_1='relu', activation_2='relu', activation_3='relu'):
|
||||||
|
import torch
|
||||||
|
class model_class(torch.nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.hidden_layer_1 = torch.nn.Linear(input_size, hidden_size_1)
|
||||||
|
self.hidden_layer_2 = torch.nn.Linear(hidden_size_1, hidden_size_2)
|
||||||
|
self.hidden_layer_3 = torch.nn.Linear(hidden_size_2, hidden_size_3)
|
||||||
|
self.output_layer = torch.nn.Linear(hidden_size_3, output_size)
|
||||||
|
def forward(self, x):
|
||||||
|
if activation_1 == 'relu':
|
||||||
|
hidden_output_1 = torch.nn.functional.relu(self.hidden_layer_1(x))
|
||||||
|
elif activation_1 == 'leaky_relu':
|
||||||
|
hidden_output_1 = torch.nn.functional.leaky_relu(self.hidden_layer_1(x))
|
||||||
|
elif activation_1 == 'sigmoid':
|
||||||
|
hidden_output_1 = torch.nn.functional.sigmoid(self.hidden_layer_1(x))
|
||||||
|
elif activation_1 == 'tanh':
|
||||||
|
hidden_output_1 = torch.nn.functional.tanh(self.hidden_layer_1(x))
|
||||||
|
else:
|
||||||
|
hidden_output_1 = self.hidden_layer_1(x)
|
||||||
|
|
||||||
|
if activation_2 == 'relu':
|
||||||
|
hidden_output_2 = torch.nn.functional.relu(self.hidden_layer_2(hidden_output_1))
|
||||||
|
elif activation_2 == 'leaky_relu':
|
||||||
|
hidden_output_2 = torch.nn.functional.leaky_relu(self.hidden_layer_2(hidden_output_1))
|
||||||
|
elif activation_2 == 'sigmoid':
|
||||||
|
hidden_output_2 = torch.nn.functional.sigmoid(self.hidden_layer_2(hidden_output_1))
|
||||||
|
elif activation_2 == 'tanh':
|
||||||
|
hidden_output_2 = torch.nn.functional.tanh(self.hidden_layer_2(hidden_output_1))
|
||||||
|
else:
|
||||||
|
hidden_output_2 = self.hidden_layer_2(hidden_output_1)
|
||||||
|
|
||||||
|
if activation_3 == 'relu':
|
||||||
|
hidden_output_3 = torch.nn.functional.relu(self.hidden_layer_3(hidden_output_2))
|
||||||
|
elif activation_3 == 'leaky_relu':
|
||||||
|
hidden_output_3 = torch.nn.functional.leaky_relu(self.hidden_layer_3(hidden_output_2))
|
||||||
|
elif activation_3 == 'sigmoid':
|
||||||
|
hidden_output_3 = torch.nn.functional.sigmoid(self.hidden_layer_3(hidden_output_2))
|
||||||
|
elif activation_3 == 'tanh':
|
||||||
|
hidden_output_3 = torch.nn.functional.tanh(self.hidden_layer_3(hidden_output_2))
|
||||||
|
else:
|
||||||
|
hidden_output_3 = self.hidden_layer_3(hidden_output_2)
|
||||||
|
|
||||||
|
output = self.output_layer(hidden_output_3)
|
||||||
|
return output
|
||||||
|
model = model_class()
|
||||||
|
return model
|
@ -1065,16 +1065,16 @@ def get_current_version(package_name='guan'):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
# Guan软件包升级提示
|
# Guan软件包升级提示
|
||||||
def notification_of_upgrade():
|
def notification_of_upgrade(timeout=2):
|
||||||
import random
|
try:
|
||||||
rand_number = random.randint(1, 10)
|
latest_version = get_latest_version(package_name='guan', timeout=timeout)
|
||||||
if rand_number == 5:
|
current_version = get_current_version('guan')
|
||||||
try:
|
if latest_version != None and current_version != None:
|
||||||
latest_version = get_latest_version(package_name='guan', timeout=2)
|
if latest_version != current_version:
|
||||||
current_version = get_current_version('guan')
|
print('提示:您当前使用的版本是 guan-'+current_version+',目前已经有最新版本 guan-'+latest_version+'。您可以通过以下命令对软件包进行升级:pip install --upgrade guan')
|
||||||
if latest_version != None and current_version != None:
|
except:
|
||||||
if latest_version != current_version:
|
pass
|
||||||
print('提示:您当前使用的版本是 guan-'+current_version+',目前已经有最新版本 guan-'+latest_version+'。您可以通过以下命令对软件包进行升级:pip install --upgrade guan')
|
import random
|
||||||
except:
|
rand_number = random.randint(1, 10)
|
||||||
pass
|
if rand_number == 5:
|
||||||
notification_of_upgrade()
|
notification_of_upgrade(timeout=2)
|
Loading…
x
Reference in New Issue
Block a user