From 111592ec2043369726ae76ebe8f95da3a88bd68c Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Sat, 27 Sep 2025 18:16:48 +0800 Subject: [PATCH 01/67] Finish channel creation Signed-off-by: dodo920306 --- src/api-engine/api_engine/urls.py | 10 +- src/api-engine/channel/serializers.py | 15 +- src/api-engine/channel/service.py | 392 ++++++++++++++++++++++++++ src/api-engine/channel/views.py | 39 ++- src/api-engine/node/service.py | 46 +-- src/api-engine/node/views.py | 3 +- src/api-engine/organization/views.py | 2 +- 7 files changed, 459 insertions(+), 48 deletions(-) create mode 100644 src/api-engine/channel/service.py diff --git a/src/api-engine/api_engine/urls.py b/src/api-engine/api_engine/urls.py index 7fdebc470..8c149e25c 100644 --- a/src/api-engine/api_engine/urls.py +++ b/src/api-engine/api_engine/urls.py @@ -26,18 +26,14 @@ TokenRefreshView, ) from django.conf.urls.static import static - +from api_engine.settings import DEBUG, WEBROOT from auth.views import RegisterViewSet, CelloTokenObtainPairView, CelloTokenVerifyView +from channel.views import ChannelViewSet from node.views import NodeViewSet from organization.views import OrganizationViewSet from user.views import UserViewSet -from api.routes.channel.views import ChannelViewSet from api.routes.chaincode.views import ChainCodeViewSet - -DEBUG = settings.DEBUG -WEBROOT = settings.WEBROOT - swagger_info = openapi.Info( title="Cello API Engine Service", default_version="1.0", @@ -54,9 +50,9 @@ # define and register routers of api router = DefaultRouter(trailing_slash=False) -router.register("nodes", NodeViewSet, basename="node") router.register("organizations", OrganizationViewSet, basename="organization") router.register("users", UserViewSet, basename="user") +router.register("node", NodeViewSet, basename="node") router.register("register", RegisterViewSet, basename="register") router.register("channels", ChannelViewSet, basename="channel") router.register("chaincodes", ChainCodeViewSet, basename="chaincode") diff --git a/src/api-engine/channel/serializers.py b/src/api-engine/channel/serializers.py index a821aa140..fd188ee5d 100644 --- a/src/api-engine/channel/serializers.py +++ b/src/api-engine/channel/serializers.py @@ -1,8 +1,10 @@ +from typing import Dict, Any + from rest_framework import serializers from channel.models import Channel +from channel.service import create from common.serializers import ListResponseSerializer -from node.enums import NodeStatus from node.models import Node @@ -15,7 +17,7 @@ class ChannelResponse( ): class Meta: model = Channel - fields = ("id", "name", "organizations", "create_ts") + fields = ("id", "name", "organizations", "created_at") class ChannelList(ListResponseSerializer): @@ -41,7 +43,14 @@ def validate(self, data): raise serializers.ValidationError("Invalid orderers") for node in Node.objects.filter(id__in=(peer_ids + orderer_ids)): - if node.status != NodeStatus.Running: + if node.status != Node.Status.RUNNING: raise serializers.ValidationError("Node {} is not running".format(node.name)) return data + + def create(self, validated_data:Dict[str, Any]) -> ChannelID: + return ChannelID(create( + self.context["organization"], + validated_data["name"], + validated_data["peer_ids"], + validated_data["orderer_ids"])) diff --git a/src/api-engine/channel/service.py b/src/api-engine/channel/service.py new file mode 100644 index 000000000..9a2a5b49f --- /dev/null +++ b/src/api-engine/channel/service.py @@ -0,0 +1,392 @@ +import json +import logging +import os +import subprocess +import time +from copy import deepcopy +from typing import List + +import yaml + +from api.exceptions import NoResource +from api_engine.settings import CELLO_HOME, FABRIC_TOOL +from channel.models import Channel +from node.models import Node +from node.service import get_org_directory, get_domain_name, get_orderer_directory, get_peer_directory +from organization.models import Organization + +LOG = logging.getLogger(__name__) + +def create( + channel_organization: Organization, + channel_name: str, + channel_peer_ids: List[str], + channel_orderer_ids: List[str]) -> Channel: + channel_peers = list(Node.objects.filter(id__in=channel_peer_ids)) + channel_orderers = list(Node.objects.filter(id__in=channel_orderer_ids)) + validate_nodes(channel_peers + channel_orderers) + + orderer_msp = "OrdererMSP" + orderer_domain_names = [get_domain_name( + channel_organization.name, + Node.Type.ORDERER, + orderer.name) for orderer in channel_orderers] + orderer_addresses = ["{}:7050".format(orderer_domain_name) for orderer_domain_name in orderer_domain_names] + consenters = [{ + "Host": orderer_domain_name, + "Port": 7050, + "ClientTLSCert": "{}/tls/server.crt".format(get_orderer_directory( + channel_organization.name, + orderer_domain_name)), + "ServerTLSCert": "{}/tls/server.crt".format(get_orderer_directory( + channel_organization.name, + orderer_domain_name)), + } for orderer_domain_name in orderer_domain_names] + orderer_organization = { + "Name": "Orderer", + "ID": orderer_msp, + "MSPDir": "{}/msp".format(get_org_directory(channel_organization.name, Node.Type.ORDERER)), + "Policies": { + "Readers": { + "Type": "Signature", + "Rule": "OR('{}.member')".format(orderer_msp), + }, + "Writers": { + "Type": "Signature", + "Rule": "OR('{}.member')".format(orderer_msp), + }, + "Admins": { + "Type": "Signature", + "Rule": "OR('{}.admin')".format(orderer_msp), + }, + }, + "OrdererEndpoints": orderer_addresses, + } + + peer_organization_name = channel_organization.name.split(".", 1)[0].capitalize() + peer_msp = "{}MSP".format(peer_organization_name) + peer_organization = { + "Name": peer_organization_name, + "ID": peer_msp, + "MSPDir": "{}/msp".format(get_org_directory(channel_organization.name, Node.Type.PEER)), + "Policies": { + "Readers": { + "Type": "Signature", + "Rule": "OR('{}.admin', '{}.peer', '{}.client')".format(peer_msp, peer_msp, peer_msp), + }, + "Writers": { + "Type": "Signature", + "Rule": "OR('{}.admin', '{}.client')".format(peer_msp, peer_msp), + }, + "Admins": { + "Type": "Signature", + "Rule": "OR('{}.admin')".format(peer_msp), + }, + "Endorsement": { + "Type": "Signature", + "Rule": "OR('{}.peer')".format(peer_msp), + } + } + } + + with open(os.path.join(CELLO_HOME, "config", "configtx.yaml"), "r", encoding="utf-8") as f: + template = yaml.load(f, Loader=yaml.FullLoader) + + application = deepcopy(template["Application"]) + application["Capabilities"] = template["Capabilities"]["Application"] + + orderer = deepcopy(template["Orderer"]) + orderer["Addresses"] = orderer_addresses + orderer["Capabilities"] = template["Capabilities"]["Orderer"] + orderer["OrdererType"] = "etcdraft" + orderer["EtcdRaft"]["Consenters"] = consenters + + channel = deepcopy(template["Channel"]) + channel["Capabilities"] = template["Capabilities"]["Channel"] + + profiles = {channel_name: deepcopy(channel)} + profiles[channel_name]["Orderer"] = deepcopy(orderer) + profiles[channel_name]["Orderer"]["Capabilities"] = template["Capabilities"]["Orderer"] + profiles[channel_name]["Orderer"]["Organizations"] = orderer_organization + profiles[channel_name]["Application"] = deepcopy(application) + profiles[channel_name]["Application"]["Capabilities"] = template["Capabilities"]["Application"] + profiles[channel_name]["Application"]["Organizations"] = peer_organization + + channel_dir = os.path.join(CELLO_HOME, channel_name) + os.makedirs(channel_dir, exist_ok=True) + with open(os.path.join(channel_dir, "configtx.yaml"), "w", encoding="utf-8") as f: + yaml.dump( + { + "Organizations": [orderer_organization, peer_organization], + "Capabilities": { + "Channel": template["Capabilities"]["Channel"], + "Orderer": template["Capabilities"]["Orderer"], + "Application": template["Capabilities"]["Application"], + }, + "Application": application, + "Orderer": orderer, + "Channel": channel, + "Profiles": profiles, + }, + f, + sort_keys=False) + + command = [ + os.path.join(FABRIC_TOOL, "configtxgen"), + "-configPath", + channel_dir, + "-profile", + channel_name, + "-outputBlock", + os.path.join(channel_dir, "genesis.block"), + "-channelID", + channel_name, + ] + LOG.info(" ".join(command)) + subprocess.run(command, check=True) + + orderer_domain_name = orderer_domain_names[0] + orderer_dir = get_orderer_directory(channel_organization.name, orderer_domain_name) + command = [ + os.path.join(FABRIC_TOOL, "osnadmin"), + "channel", + "join", + "--channelID", + channel_name, + "--config-block", + os.path.join(channel_dir, "genesis.block"), + "-o", + "{}:7053".format(orderer_domain_name), + "--ca-file", + "{}/msp/tlscacerts/tlsca.{}-cert.pem".format( + orderer_dir, + channel_organization.name.split(".", 1)[1], + ), + "--client-cert", + "{}/tls/server.crt".format(orderer_dir), + "--client-key", + "{}/tls/server.key".format(orderer_dir), + ] + LOG.info(" ".join(command)) + subprocess.run( + command, + check=True) + + peer_domain_names = [ + get_domain_name(channel_organization.name, Node.Type.PEER, peer.name) for peer in channel_peers + ] + for peer_domain_name in peer_domain_names: + command = [ + os.path.join(FABRIC_TOOL, "peer"), + "channel", + "join", + "-b", + os.path.join(channel_dir, "genesis.block"), + ] + LOG.info(" ".join(command)) + peer_dir = get_peer_directory(channel_organization.name, peer_domain_name) + env = { + "CORE_PEER_TLS_ENABLED": "true", + "CORE_PEER_LOCALMSPID": peer_msp, + "CORE_PEER_TLS_ROOTCERT_FILE": "{}/tls/ca.crt".format(peer_dir), + "CORE_PEER_MSPCONFIGPATH": "{}/users/Admin@{}/msp".format( + get_org_directory(channel_organization.name, Node.Type.PEER), + channel_organization.name + ), + "CORE_PEER_ADDRESS": "{}:7051".format(peer_domain_name), + "FABRIC_CFG_PATH": peer_dir, + } + subprocess.run( + command, + env=env, + check=True) + + command = [ + os.path.join(FABRIC_TOOL, "peer"), + "channel", + "fetch", + "config", + os.path.join(channel_dir, "config_block.pb"), + "-o", + orderer_addresses[0], + "--ordererTLSHostnameOverride", + orderer_domain_name, + "-c", + channel_name, + "--tls", + "--cafile", + "{}/msp/tlscacerts/tlsca.{}-cert.pem".format( + orderer_dir, + channel_organization.name.split(".", 1)[1], + ) + ] + LOG.info(" ".join(command)) + anchor_peer_domain_name = peer_domain_names[0] + anchor_peer_dir = get_peer_directory(channel_organization.name, anchor_peer_domain_name) + time.sleep(5) + subprocess.run( + command, + env={ + "CORE_PEER_TLS_ENABLED": "true", + "CORE_PEER_LOCALMSPID": peer_msp, + "CORE_PEER_TLS_ROOTCERT_FILE": "{}/tls/ca.crt".format(anchor_peer_dir), + "CORE_PEER_MSPCONFIGPATH": "{}/users/Admin@{}/msp".format( + get_org_directory(channel_organization.name, Node.Type.PEER), + channel_organization.name + ), + "CORE_PEER_ADDRESS": "{}:7051".format( + anchor_peer_domain_name + ), + "FABRIC_CFG_PATH": anchor_peer_dir, + }, + check=True) + + command = [ + os.path.join(FABRIC_TOOL, "configtxlator"), + "proto_decode", + "--input={}".format(os.path.join(channel_dir, "config_block.pb")), + "--type=common.Block", + "--output={}".format(os.path.join(channel_dir, "config_block.json")), + ] + LOG.info(" ".join(command)) + subprocess.run(command, check=True) + + with open(os.path.join(channel_dir, "config_block.json"), "r", encoding="utf-8") as f: + config_block = json.load(f) + + with open(os.path.join(channel_dir, "config.json"), "w", encoding="utf-8") as f: + json.dump(config_block["data"]["data"][0]["payload"]["data"]["config"], f, sort_keys=False, indent=4) + + with open(os.path.join(channel_dir, "config.json"), "r", encoding="utf-8") as f: + config = json.load(f) + + config["channel_group"]["groups"]["Application"]["groups"][peer_organization_name]["values"].update({ + "AnchorPeers": { + "mod_policy": "Admins", + "value": { + "anchor_peers": [ + {"host": "{}:7051".format(anchor_peer_domain_name)} + ] + }, + "version": 0, + } + }) + + with open(os.path.join(channel_dir, "modified_config.json"), "w", encoding="utf-8") as f: + json.dump(config, f, sort_keys=False, indent=4) + + command = [ + os.path.join(FABRIC_TOOL, "configtxlator"), + "proto_encode", + "--input={}".format(os.path.join(channel_dir, "config.json")), + "--type=common.Config", + "--output={}".format(os.path.join(channel_dir, "config.pb")), + ] + LOG.info(" ".join(command)) + subprocess.run(command, check=True) + + command = [ + os.path.join(FABRIC_TOOL, "configtxlator"), + "proto_encode", + "--input={}".format(os.path.join(channel_dir, "modified_config.json")), + "--type=common.Config", + "--output={}".format(os.path.join(channel_dir, "modified_config.pb")), + ] + LOG.info(" ".join(command)) + subprocess.run(command, check=True) + + command = [ + os.path.join(FABRIC_TOOL, "configtxlator"), + "compute_update", + "--original={}".format(os.path.join(channel_dir, "config.pb")), + "--updated={}".format(os.path.join(channel_dir, "modified_config.pb")), + "--channel_id={}".format(channel_name), + "--output={}".format(os.path.join(channel_dir, "config_update.pb")), + ] + LOG.info(" ".join(command)) + subprocess.run(command, check=True) + + command = [ + os.path.join(FABRIC_TOOL, "configtxlator"), + "proto_decode", + "--input={}".format(os.path.join(channel_dir, "config_update.pb")), + "--type=common.ConfigUpdate", + "--output={}".format(os.path.join(channel_dir, "config_update.json")), + ] + LOG.info(" ".join(command)) + subprocess.run(command, check=True) + + with open(os.path.join(channel_dir, "config_update.json"), "r", encoding="utf-8") as f: + config_update = json.load(f) + + with open(os.path.join(channel_dir, "config_update_in_envelope.json"), "w", encoding="utf-8") as f: + json.dump( + { + "payload": { + "header": { + "channel_header": {"channel_id": channel_name, "type": 2} + }, + "data": {"config_update": config_update}, + } + }, + f, + sort_keys = False, + indent = 4 + ) + + command = [ + os.path.join(FABRIC_TOOL, "configtxlator"), + "proto_encode", + "--input={}".format(os.path.join(channel_dir, "config_update_in_envelope.json")), + "--type=common.Envelope", + "--output={}".format(os.path.join(channel_dir, "config_update_in_envelope.pb")), + ] + LOG.info(" ".join(command)) + subprocess.run(command, check=True) + + command = [ + os.path.join(FABRIC_TOOL, "peer"), + "channel", + "update", + "-f", + os.path.join(channel_dir, "config_update_in_envelope.pb"), + "-c", + channel_name, + "-o", + orderer_addresses[0], + "--ordererTLSHostnameOverride", + orderer_domain_name, + "--tls", + "--cafile", + "{}/msp/tlscacerts/tlsca.{}-cert.pem".format( + orderer_dir, + channel_organization.name.split(".", 1)[1], + ) + ] + LOG.info(" ".join(command)) + subprocess.run( + command, + env={ + "CORE_PEER_TLS_ENABLED": "true", + "CORE_PEER_LOCALMSPID": peer_msp, + "CORE_PEER_TLS_ROOTCERT_FILE": "{}/tls/ca.crt".format(anchor_peer_dir), + "CORE_PEER_MSPCONFIGPATH": "{}/users/Admin@{}/msp".format( + get_org_directory(channel_organization.name, Node.Type.PEER), + channel_organization.name + ), + "CORE_PEER_ADDRESS": "{}:7051".format( + anchor_peer_domain_name + ), + "FABRIC_CFG_PATH": anchor_peer_dir, + }, + check=True) + + res = Channel.objects.create(name=channel_name) + res.organizations.add(channel_organization) + res.orderers.add(channel_orderers[0]) + return res + +def validate_nodes(nodes: List[Node]): + for node in nodes: + if node.status != Node.Status.RUNNING: + raise NoResource("Node {} is not running".format(node.name)) diff --git a/src/api-engine/channel/views.py b/src/api-engine/channel/views.py index ba877402c..56a1cdc04 100644 --- a/src/api-engine/channel/views.py +++ b/src/api-engine/channel/views.py @@ -5,8 +5,9 @@ from rest_framework.response import Response from api.common import ok +from api.common.response import make_response_serializer from channel.models import Channel -from channel.serializers import ChannelList, ChannelID +from channel.serializers import ChannelList, ChannelID, ChannelResponse, ChannelCreateBody from common.responses import with_common_response from common.serializers import PageQuerySerializer @@ -19,24 +20,34 @@ class ChannelViewSet(viewsets.ViewSet): ] @swagger_auto_schema( + operation_summary="List all channels of the current organization", query_serializer=PageQuerySerializer(), responses=with_common_response( - {status.HTTP_200_OK: ChannelList} + {status.HTTP_200_OK: make_response_serializer(ChannelList)} ), ) def list(self, request): serializer = PageQuerySerializer(data=request.GET) - serializer.is_valid(raise_exception=True) - page = serializer.validated_data.get("page") - per_page = serializer.validated_data.get("per_page") - p = Paginator(Channel.objects.filter(organizations=request.user.organization), per_page) - response = ChannelList( - data={ - "total": p.count, - "data": list(p.page(page).object_list), - } - ) - response.is_valid(raise_exception=True) + p = serializer.get_paginator(Channel.objects.filter(organizations__id__contains=request.user.organization.id)) + response = ChannelList({ + "total": p.count, + "data": ChannelResponse(p.page(serializer.data["page"]).object_list, many=True).data, + }) return Response( - ok(response.validated_data), status=status.HTTP_200_OK + status=status.HTTP_200_OK, + data=ok(response.data), ) + + @swagger_auto_schema( + operation_summary="Create a channel of the current organization", + request_body=ChannelCreateBody(), + responses=with_common_response( + {status.HTTP_201_CREATED: make_response_serializer(ChannelID)} + ), + ) + def create(self, request): + serializer = ChannelCreateBody(data=request.data, context={"organization": request.user.organization}) + serializer.is_valid(raise_exception=True) + return Response( + status=status.HTTP_201_CREATED, + data=serializer.save().data) diff --git a/src/api-engine/node/service.py b/src/api-engine/node/service.py index d7f6fb2fa..6c40df912 100644 --- a/src/api-engine/node/service.py +++ b/src/api-engine/node/service.py @@ -17,7 +17,7 @@ def create(organization: Organization, node_type: Node.Type, node_name: str) -> Node: CryptoConfig(organization.name).update({"type": node_type, "Specs": [node_name]}) CryptoGen(organization.name).extend() - node_domain_name = _get_domain_name(organization.name, node_type, node_name) + node_domain_name = get_domain_name(organization.name, node_type, node_name) _generate_node_config(organization.name, node_type, node_domain_name) msp = _get_msp(organization.name, node_type, node_domain_name) tls = _get_tls(organization.name, node_type, node_domain_name) @@ -56,7 +56,7 @@ def create(organization: Organization, node_type: Node.Type, node_name: str) -> node.save() return node -def _get_domain_name(organization_name: str, node_type: Node.Type, node_name: str) -> str: +def get_domain_name(organization_name: str, node_type: Node.Type, node_name: str) -> str: return "{}.{}".format( node_name, organization_name @@ -75,7 +75,7 @@ def _generate_peer_config(organization_name: str, peer_domain_name: str) -> None _generate_config( FABRIC_PEER_CFG, os.path.join( - _get_peer_directory(organization_name, peer_domain_name), + get_peer_directory(organization_name, peer_domain_name), "core.yaml"), **{ "peer_tls_enabled": True, @@ -84,7 +84,7 @@ def _generate_peer_config(organization_name: str, peer_domain_name: str) -> None "peer_gossip_bootstrap": "{}:7051".format(peer_domain_name), "peer_gossip_externalEndpoint": "{}:7051".format(peer_domain_name), "peer_id": peer_domain_name, - "peer_localMspId": "{}MSP".format(organization_name.capitalize()), + "peer_localMspId": "{}MSP".format(organization_name.split(".", 1)[0].capitalize()), "peer_mspConfigPath": "/etc/hyperledger/fabric/msp", "peer_tls_cert_file": "/etc/hyperledger/fabric/tls/server.crt", "peer_tls_key_file": "/etc/hyperledger/fabric/tls/server.key", @@ -98,7 +98,7 @@ def _generate_orderer_config(organization_name: str, orderer_domain_name: str) - _generate_config( FABRIC_ORDERER_CFG, os.path.join( - _get_orderer_directory(organization_name, orderer_domain_name), + get_orderer_directory(organization_name, orderer_domain_name), "orderer.yaml"), **{ "Admin_TLS_Enabled": True, @@ -166,7 +166,7 @@ def _get_cfg(organization_name: str, node_type: Node.Type, node_domain_name: str return None def _get_peer_cfg(organization_name: str, peer_domain_name: str): - directory_path = _get_peer_directory(organization_name, peer_domain_name) + directory_path = get_peer_directory(organization_name, peer_domain_name) cfg_zip_path = os.path.join(directory_path, "peer_config.zip") _zip_directory( os.path.join(directory_path, "core.yaml"), @@ -176,7 +176,7 @@ def _get_peer_cfg(organization_name: str, peer_domain_name: str): return base64.b64encode(cfg_zip_input_stream.read()) def _get_orderer_cfg(organization_name: str, orderer_domain_name: str): - directory_path = _get_orderer_directory(organization_name, orderer_domain_name) + directory_path = get_orderer_directory(organization_name, orderer_domain_name) cfg_zip_path = os.path.join(directory_path, "orderer_config.zip") _zip_directory( os.path.join(directory_path, "orderer.yaml"), @@ -201,24 +201,28 @@ def _zip_directory(directory_path:str, output_file_path: str) -> None: str(os.path.join(path_inside_zip, sud_directory)) ) -def _get_peer_directory(organization_name: str, peer_domain_name: str): +def get_peer_directory(organization_name: str, peer_domain_name: str): return _get_node_directory(organization_name, Node.Type.PEER, peer_domain_name) -def _get_orderer_directory(organization_name: str, orderer_domain_name: str): +def get_orderer_directory(organization_name: str, orderer_domain_name: str): return _get_node_directory(organization_name, Node.Type.ORDERER, orderer_domain_name) -def _get_node_directory(organization_name: str, node_type: Node.Type, node_domain_name: str): - return ("{}/{}/crypto-config/{}Organizations/{}/{}s/{}" - .format( - CELLO_HOME, - organization_name, - node_type.lower(), - organization_name.split(".", 1)[1] - if node_type == Node.Type.ORDERER - else organization_name, - node_type.lower(), - node_domain_name, - )) +def _get_node_directory(organization_name: str, node_type: Node.Type, node_domain_name: str) -> str: + return "{}/{}s/{}".format( + get_org_directory(organization_name, node_type), + node_type.lower(), + node_domain_name, + ) + +def get_org_directory(organization_name: str, node_type: Node.Type) -> str: + return "{}/{}/crypto-config/{}Organizations/{}".format( + CELLO_HOME, + organization_name, + node_type.lower(), + organization_name.split(".", 1)[1] + if node_type == Node.Type.ORDERER + else organization_name, + ) def _get_node_env(node_type: Node.Type, node_domain_name: str, msp, tls, cfg) -> Optional[Dict[str, Any]]: if node_type == Node.Type.PEER: diff --git a/src/api-engine/node/views.py b/src/api-engine/node/views.py index 666659858..d8483700f 100644 --- a/src/api-engine/node/views.py +++ b/src/api-engine/node/views.py @@ -1,4 +1,3 @@ -from django.core.paginator import Paginator from drf_yasg.utils import swagger_auto_schema from rest_framework import viewsets, status from rest_framework.permissions import IsAuthenticated @@ -21,7 +20,7 @@ class NodeViewSet(viewsets.ViewSet): operation_summary="List all nodes of the current organization", query_serializer=PageQuerySerializer(), responses=with_common_response( - with_common_response({status.HTTP_200_OK: make_response_serializer(NodeListSerializer)}) + {status.HTTP_200_OK: make_response_serializer(NodeListSerializer)} ), ) def list(self, request): diff --git a/src/api-engine/organization/views.py b/src/api-engine/organization/views.py index 7fa5a24be..2f2cbf8ed 100644 --- a/src/api-engine/organization/views.py +++ b/src/api-engine/organization/views.py @@ -25,7 +25,7 @@ class OrganizationViewSet(viewsets.ViewSet): operation_summary="Get Organizations", query_serializer=PageQuerySerializer(), responses=with_common_response( - with_common_response({status.HTTP_200_OK: make_response_serializer(OrganizationList)}) + {status.HTTP_200_OK: make_response_serializer(OrganizationList)} ), ) def list(self, request): From 284fc8afdb3e06a7b39502cd9619b5b2e62c1670 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Sat, 27 Sep 2025 18:17:36 +0800 Subject: [PATCH 02/67] Remove old channel codes Signed-off-by: dodo920306 --- .../api/routes/channel/serializers.py | 73 --- src/api-engine/api/routes/channel/views.py | 554 ------------------ 2 files changed, 627 deletions(-) delete mode 100644 src/api-engine/api/routes/channel/serializers.py delete mode 100644 src/api-engine/api/routes/channel/views.py diff --git a/src/api-engine/api/routes/channel/serializers.py b/src/api-engine/api/routes/channel/serializers.py deleted file mode 100644 index 202c4f440..000000000 --- a/src/api-engine/api/routes/channel/serializers.py +++ /dev/null @@ -1,73 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -from rest_framework import serializers - -from api.common.serializers import ListResponseSerializer -from channel.models import Channel - - -class ChannelCreateBody(serializers.Serializer): - name = serializers.CharField(max_length=128, required=True) - peers = serializers.ListField( - child=serializers.UUIDField(help_text="ID of Peer Nodes") - ) - orderers = serializers.ListField( - child=serializers.UUIDField(help_text="ID of Orderer Nodes") - ) - - def validate(self, attrs): - if len(attrs["peers"]) < 1: - raise serializers.ValidationError("Invalid peers") - if len(attrs["orderers"]) < 1: - raise serializers.ValidationError("Invalid orderers") - - return super().validate(attrs) - - -class ChannelIDSerializer(serializers.Serializer): - id = serializers.UUIDField(help_text="Channel ID") - - -ORG_CHOICES = ( - ("Application", "Application"), - ("Orderer", "Orderer"), -) - - -class ChannelUpdateSerializer(serializers.Serializer): - msp_id = serializers.CharField( - max_length=128, help_text="MSP ID of Organization" - ) - data = serializers.FileField(help_text="Channel config file") - org_type = serializers.ChoiceField( - help_text="Organization type", choices=ORG_CHOICES - ) - - -class ChannelOrgListSerializer(serializers.Serializer): - id = serializers.UUIDField(help_text="Organization ID") - name = serializers.CharField( - max_length=128, help_text="name of Organization" - ) - - -class ChannelNetworkSerializer(serializers.Serializer): - id = serializers.UUIDField(help_text="Network ID") - name = serializers.CharField(max_length=128, help_text="name of Network") - - -class ChannelResponseSerializer( - ChannelIDSerializer, serializers.ModelSerializer -): - id = serializers.UUIDField(help_text="ID of Channel") - network = ChannelNetworkSerializer() - organizations = ChannelOrgListSerializer(many=True) - - class Meta: - model = Channel - fields = ("id", "name", "network", "organizations", "created_at") - - -class ChannelListResponse(ListResponseSerializer): - data = ChannelResponseSerializer(many=True, help_text="Channel data") diff --git a/src/api-engine/api/routes/channel/views.py b/src/api-engine/api/routes/channel/views.py deleted file mode 100644 index 87c544297..000000000 --- a/src/api-engine/api/routes/channel/views.py +++ /dev/null @@ -1,554 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -from copy import deepcopy -import logging -import json - -from rest_framework import viewsets, status -from rest_framework.decorators import action -from rest_framework.response import Response -from rest_framework.parsers import MultiPartParser, FormParser, JSONParser -from rest_framework.permissions import IsAuthenticated - -from drf_yasg.utils import swagger_auto_schema - -from django.core.exceptions import ObjectDoesNotExist - -from api.utils.common import ( - with_common_response, - to_dict, - json_filter, - json_add_anchor_peer, - json_create_envelope, - init_env_vars, -) -from api.lib.configtxgen import ConfigTX, ConfigTxGen -from api.lib.peer.channel import Channel as PeerChannel -from api.lib.configtxlator.configtxlator import ConfigTxLator -from api.exceptions import ResourceNotFound, NoResource - -from api.routes.channel.serializers import ( - ChannelCreateBody, - ChannelIDSerializer, - ChannelResponseSerializer, - ChannelUpdateSerializer, -) - -from api.common import ok, err -from channel.models import Channel -from node.models import Node -from organization.models import Organization - -LOG = logging.getLogger(__name__) - -CFG_JSON = "cfg.json" -CFG_PB = "cfg.pb" -DELTA_PB = "delta.pb" -DELTA_JSON = "delta.json" -UPDATED_CFG_JSON = "update_cfg.json" -UPDATED_CFG_PB = "update_cfg.pb" -CFG_DELTA_ENV_JSON = "cfg_delta_env.json" -CFG_DELTA_ENV_PB = "cfg_delta_env.pb" - - -class ChannelViewSet(viewsets.ViewSet): - """Class represents Channel related operations.""" - - permission_classes = [ - IsAuthenticated, - ] - parser_classes = [MultiPartParser, FormParser, JSONParser] - - @swagger_auto_schema( - request_body=ChannelCreateBody, - responses=with_common_response( - {status.HTTP_201_CREATED: ChannelIDSerializer} - ), - ) - def create(self, request): - """ - Create Channel - :param request: create parameter - :return: Channel ID - :rtype: uuid - """ - - serializer = ChannelCreateBody(data=request.data) - if serializer.is_valid(raise_exception=True): - name = serializer.validated_data.get("name") - peers = serializer.validated_data.get("peers") - orderers = serializer.validated_data.get("orderers") - - try: - org = request.user.organization - orderer_nodes = Node.objects.filter(id__in=orderers) - peer_nodes = Node.objects.filter(id__in=peers) - - # validate if all nodes are running - validate_nodes(orderer_nodes) - validate_nodes(peer_nodes) - - # assemble transaction config - _orderers, _peers = assemble_transaction_config(org) - - ConfigTX(org.network.name).create( - name, org.network.consensus, _orderers, _peers - ) - ConfigTxGen(org.network.name).genesis( - profile=name, - channelid=name, - outputblock="{}.block".format(name), - ) - - # osnadmin channel join - ordering_node = Node.objects.get(id=orderers[0]) - osn_channel_join(name, ordering_node, org) - - # peer channel join - peer_channel_join(name, peers, org) - - # set anchor peer - anchor_peer = Node.objects.get(id=peers[0]) - set_anchor_peer(name, org, anchor_peer, ordering_node) - - # save channel to db - channel = Channel(name=name, network=org.network) - channel.save() - channel.organizations.add(org) - channel.orderers.add(ordering_node) - - # serialize and return channel id - response = ChannelIDSerializer(data=channel.__dict__) - if response.is_valid(raise_exception=True): - return Response( - ok(response.validated_data), - status=status.HTTP_201_CREATED, - ) - except Exception as e: - return Response( - err(e.args), status=status.HTTP_400_BAD_REQUEST - ) - - @swagger_auto_schema( - responses=with_common_response( - {status.HTTP_200_OK: ChannelResponseSerializer} - ), - ) - def retrieve(self, request, pk=None): - """ - Retrieve channel - :param request: retrieve parameter - :param pk: primary key - :return: none - :rtype: rest_framework.status - """ - try: - channel = Channel.objects.get(id=pk) - response = ChannelResponseSerializer(instance=channel) - return Response(ok(response.data), status=status.HTTP_200_OK) - - except ObjectDoesNotExist: - LOG.exception("channel not found") - raise ResourceNotFound - - @swagger_auto_schema( - request_body=ChannelUpdateSerializer, - responses=with_common_response({status.HTTP_202_ACCEPTED: "Accepted"}), - ) - def update(self, request, pk=None): - """ - Update channel - :param request: update parameters - :param pk: primary key - :return: none - :rtype: rest_framework.status - """ - serializer = ChannelUpdateSerializer(data=request.data) - if serializer.is_valid(raise_exception=True): - channel = Channel.objects.get(id=pk) - org = request.user.organization - try: - # Read uploaded file in cache without saving it on disk. - file = request.FILES.get("data").read() - json_data = file.decode("utf8").replace("'", '"') - data = json.loads(json_data) - msp_id = serializer.validated_data.get("msp_id") - org_type = serializer.validated_data.get("org_type") - # Validate uploaded config file - try: - config = data["config"]["channel_group"]["groups"][ - org_type - ]["groups"][msp_id] - except KeyError: - LOG.exception("config file not found") - raise ResourceNotFound - - try: - # Read current channel config from local disk - with open( - channel.get_channel_artifacts_path(CFG_JSON), - "r", - encoding="utf-8", - ) as f: - LOG.info("load current config success") - current_config = json.load(f) - except FileNotFoundError: - LOG.exception("current config file not found") - raise ResourceNotFound - - # Create a new org - new_org = Organization.objects.create( - name=org.name, - ) - LOG.info("new org created") - updated_config = deepcopy(current_config) - updated_config["channel_group"]["groups"]["Application"][ - "groups" - ][msp_id] = config - LOG.info("update config success", updated_config) - - # Update and save the config with new org - with open( - channel.get_channel_artifacts_path(UPDATED_CFG_JSON), - "w", - encoding="utf-8", - ) as f: - LOG.info("save updated config success") - json.dump(updated_config, f, sort_keys=False) - - # Encode it into pb. - ConfigTxLator().proto_encode( - input=channel.get_channel_artifacts_path(UPDATED_CFG_JSON), - type="common.Config", - output=channel.get_channel_artifacts_path(UPDATED_CFG_PB), - ) - LOG.info("encode config to pb success") - - # Calculate the config delta between pb files - ConfigTxLator().compute_update( - original=channel.get_channel_artifacts_path(CFG_PB), - updated=channel.get_channel_artifacts_path(UPDATED_CFG_PB), - channel_id=channel.name, - output=channel.get_channel_artifacts_path(DELTA_PB), - ) - LOG.info("compute config delta success") - # Decode the config delta pb into json - config_update = ConfigTxLator().proto_decode( - input=channel.get_channel_artifacts_path(DELTA_PB), - type="common.ConfigUpdate", - ) - LOG.info("decode config delta to json success") - # Wrap the config update as envelope - updated_config = { - "payload": { - "header": { - "channel_header": { - "channel_id": channel.name, - "type": 2, - } - }, - "data": {"config_update": to_dict(config_update)}, - } - } - with open( - channel.get_channel_artifacts_path(CFG_JSON), - "w", - encoding="utf-8", - ) as f: - LOG.info("save config to json success") - json.dump(updated_config, f, sort_keys=False) - - # Encode the config update envelope into pb - ConfigTxLator().proto_encode( - input=channel.get_channel_artifacts_path(CFG_JSON), - type="common.Envelope", - output=channel.get_channel_artifacts_path( - CFG_DELTA_ENV_PB - ), - ) - LOG.info("Encode the config update envelope success") - - # Peers to send the update transaction - nodes = Node.objects.filter( - organization=org, - type=FabricNodeType.Peer.name.lower(), - status=NodeStatus.Running.name.lower(), - ) - - for node in nodes: - dir_node = "{}/{}/crypto-config/peerOrganizations".format( - CELLO_HOME, org.name - ) - env = { - "FABRIC_CFG_PATH": "{}/{}/peers/{}/".format( - dir_node, org.name, node.name + "." + org.name - ), - } - cli = PeerChannel(**env) - cli.signconfigtx( - channel.get_channel_artifacts_path(CFG_DELTA_ENV_PB) - ) - LOG.info("Peers to send the update transaction success") - - # Save a new organization to db. - new_org.save() - LOG.info("new_org save success") - return Response(ok(None), status=status.HTTP_202_ACCEPTED) - except ObjectDoesNotExist: - LOG.exception("channel not found") - raise ResourceNotFound - - @swagger_auto_schema( - responses=with_common_response({status.HTTP_200_OK: "Accepted"}), - ) - @action(methods=["get"], detail=True, url_path="configs") - def get_channel_org_config(self, request, pk=None): - try: - org = request.user.organization - channel = Channel.objects.get(id=pk) - peer = Node.objects.filter( - organization=org, - type=FabricNodeType.Peer.name.lower(), - status=NodeStatus.Running.name.lower(), - ).first() - orderer = Node.objects.filter( - organization=org, - type=FabricNodeType.Orderer.name.lower(), - status=NodeStatus.Running.name.lower() - ).first() - - peer_channel_fetch(channel.name, org, peer, orderer) - - # Decode block to JSON - ConfigTxLator().proto_decode( - input=channel.get_channel_artifacts_path("config_block.pb"), - type="common.Block", - output=channel.get_channel_artifacts_path("config_block.json"), - ) - - # Get the config data from the block - json_filter( - input=channel.get_channel_artifacts_path("config_block.json"), - output=channel.get_channel_artifacts_path("config.json"), - expression=".data.data[0].payload.data.config" - ) - - # Prepare return data - with open(channel.get_channel_artifacts_path("config.json"), 'r', encoding='utf-8') as f: - data = { - "config": json.load(f), - "organization": org.name, - # TODO: create a method on Organization or Node to return msp_id - "msp_id": '{}'.format(org.name.split(".")[0].capitalize()) - } - return Response(data=data, status=status.HTTP_200_OK) - except ObjectDoesNotExist: - LOG.exception("channel org not found") - raise ResourceNotFound - - -def validate_nodes(nodes): - """ - validate if all nodes are running - :param nodes: list of nodes - :return: none - """ - for node in nodes: - if node.status != NodeStatus.Running.name.lower(): - raise NoResource("Node {} is not running".format(node.name)) - - -def assemble_transaction_config(org): - """ - Assemble transaction config for the channel. - :param org: Organization object. - :return: _orderers, _peers - """ - _orderers = [{"name": org.name, "hosts": []}] - _peers = [{"name": org.name, "hosts": []}] - nodes = Node.objects.filter(organization=org) - for node in nodes: - if node.type == "peer": - _peers[0]["hosts"].append({"name": node.name}) - elif node.type == "orderer": - _orderers[0]["hosts"].append({"name": node.name}) - - return _orderers, _peers - - -def osn_channel_join(name, ordering_node, org): - """ - Join ordering node to the channel. - :param ordering_node: Node object - :param org: Organization object. - :param channel_name: Name of the channel. - :return: none - """ - envs = init_env_vars(ordering_node, org) - peer_channel_cli = PeerChannel(**envs) - peer_channel_cli.create( - channel=name, - orderer_admin_url="{}.{}:{}".format( - ordering_node.name, org.name.split(".", 1)[1], str(7053) - ), - block_path="{}/{}/{}.block".format(CELLO_HOME, org.network.name, name), - ) - - -def peer_channel_join(name, peers, org): - """ - Join peer nodes to the channel. - :param peers: list of Node objects - :param org: Organization object. - :param channel_name: Name of the channel. - :return: none - """ - for i in range(len(peers)): - peer_node = Node.objects.get(id=peers[i]) - envs = init_env_vars(peer_node, org) - peer_channel_cli = PeerChannel(**envs) - peer_channel_cli.join( - block_path="{}/{}/{}.block".format( - CELLO_HOME, org.network.name, name - ) - ) - - -def set_anchor_peer(name, org, anchor_peer, ordering_node): - """ - Set anchor peer for the channel. - :param org: Organization object. - :param anchor_peer: Anchor peer node - :param ordering_node: Orderer node - :return: none - """ - org_msp = "{}".format(org.name.split(".", 1)[0].capitalize()) - channel_artifacts_path = "{}/{}".format(CELLO_HOME, org.network.name) - - # Fetch the channel block from the orderer - peer_channel_fetch(name, org, anchor_peer, ordering_node) - - # Decode block to JSON - ConfigTxLator().proto_decode( - input="{}/config_block.pb".format(channel_artifacts_path), - type="common.Block", - output="{}/config_block.json".format(channel_artifacts_path), - ) - - # Get the config data from the block - json_filter( - input="{}/config_block.json".format(channel_artifacts_path), - output="{}/config.json".format(channel_artifacts_path), - expression=".data.data[0].payload.data.config", - ) - - # add anchor peer config - anchor_peer_config = { - "AnchorPeers": { - "mod_policy": "Admins", - "value": { - "anchor_peers": [ - {"host": anchor_peer.name + "." + org.name, "port": 7051} - ] - }, - "version": 0, - } - } - - json_add_anchor_peer( - input="{}/config.json".format(channel_artifacts_path), - output="{}/modified_config.json".format(channel_artifacts_path), - anchor_peer_config=anchor_peer_config, - org_msp=org_msp, - ) - - ConfigTxLator().proto_encode( - input="{}/config.json".format(channel_artifacts_path), - type="common.Config", - output="{}/config.pb".format(channel_artifacts_path), - ) - - ConfigTxLator().proto_encode( - input="{}/modified_config.json".format(channel_artifacts_path), - type="common.Config", - output="{}/modified_config.pb".format(channel_artifacts_path), - ) - - ConfigTxLator().compute_update( - original="{}/config.pb".format(channel_artifacts_path), - updated="{}/modified_config.pb".format(channel_artifacts_path), - channel_id=name, - output="{}/config_update.pb".format(channel_artifacts_path), - ) - - ConfigTxLator().proto_decode( - input="{}/config_update.pb".format(channel_artifacts_path), - type="common.ConfigUpdate", - output="{}/config_update.json".format(channel_artifacts_path), - ) - - # Create config update envelope - json_create_envelope( - input="{}/config_update.json".format(channel_artifacts_path), - output="{}/config_update_in_envelope.json".format( - channel_artifacts_path - ), - channel=name, - ) - - ConfigTxLator().proto_encode( - input="{}/config_update_in_envelope.json".format( - channel_artifacts_path - ), - type="common.Envelope", - output="{}/config_update_in_envelope.pb".format( - channel_artifacts_path - ), - ) - - # Update the channel of anchor peer - peer_channel_update( - name, org, anchor_peer, ordering_node, channel_artifacts_path - ) - - -def peer_channel_fetch(name, org, anchor_peer, ordering_node): - """ - Fetch the channel block from the orderer. - :param anchor_peer: Anchor peer node - :param org: Organization object. - :param channel_name: Name of the channel. - :return: none - """ - PeerChannel(**{**init_env_vars(ordering_node, org), **init_env_vars(anchor_peer, org)}).fetch( - block_path="{}/{}/config_block.pb".format(CELLO_HOME, org.network.name), - channel=name, orderer_general_url="{}.{}:{}".format( - ordering_node.name, - org.name.split(".", 1)[1], - str(7050) - ) - ) - - -def peer_channel_update( - name, org, anchor_peer, ordering_node, channel_artifacts_path -): - """ - Update the channel. - :param anchor_peer: Anchor peer node - :param org: Organization object. - :param channel_name: Name of the channel. - :return: none - """ - envs = init_env_vars(anchor_peer, org) - peer_channel_cli = PeerChannel(**envs) - peer_channel_cli.update( - channel=name, - channel_tx="{}/config_update_in_envelope.pb".format( - channel_artifacts_path - ), - orderer_url="{}.{}:{}".format( - ordering_node.name, org.name.split(".", 1)[1], str(7050) - ), - ) From ce6ba868afe8339b0d6d97562a68c463e56819bd Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Sat, 27 Sep 2025 19:20:00 +0800 Subject: [PATCH 03/67] Finish list chaincodes Signed-off-by: dodo920306 --- src/api-engine/api_engine/urls.py | 4 +- src/api-engine/chaincode/models.py | 52 +++++++++++++++++++ src/api-engine/chaincode/serializers.py | 25 +++++++++ .../__init__.py => chaincode/service.py} | 0 src/api-engine/chaincode/views.py | 40 ++++++++++++++ src/api-engine/node/models.py | 5 -- src/api-engine/node/serializers.py | 1 - 7 files changed, 119 insertions(+), 8 deletions(-) create mode 100644 src/api-engine/chaincode/serializers.py rename src/api-engine/{api/routes/channel/__init__.py => chaincode/service.py} (100%) diff --git a/src/api-engine/api_engine/urls.py b/src/api-engine/api_engine/urls.py index 8c149e25c..4938165ad 100644 --- a/src/api-engine/api_engine/urls.py +++ b/src/api-engine/api_engine/urls.py @@ -28,11 +28,11 @@ from django.conf.urls.static import static from api_engine.settings import DEBUG, WEBROOT from auth.views import RegisterViewSet, CelloTokenObtainPairView, CelloTokenVerifyView +from chaincode.views import ChaincodeViewSet from channel.views import ChannelViewSet from node.views import NodeViewSet from organization.views import OrganizationViewSet from user.views import UserViewSet -from api.routes.chaincode.views import ChainCodeViewSet swagger_info = openapi.Info( title="Cello API Engine Service", @@ -55,7 +55,7 @@ router.register("node", NodeViewSet, basename="node") router.register("register", RegisterViewSet, basename="register") router.register("channels", ChannelViewSet, basename="channel") -router.register("chaincodes", ChainCodeViewSet, basename="chaincode") +router.register("chaincodes", ChaincodeViewSet, basename="chaincode") urlpatterns = [path(WEBROOT, include(router.urls + [ path( diff --git a/src/api-engine/chaincode/models.py b/src/api-engine/chaincode/models.py index 71a836239..92ba2525f 100644 --- a/src/api-engine/chaincode/models.py +++ b/src/api-engine/chaincode/models.py @@ -1,3 +1,55 @@ from django.db import models +from channel.models import Channel +from common.utils import make_uuid +from user.models import UserProfile + + # Create your models here. +class Chaincode(models.Model): + id = models.UUIDField( + primary_key=True, + help_text="Chaincode ID", + default=make_uuid, + editable=False, + unique=True, + ) + package_id = models.CharField( + help_text="Chaincode Package ID", + max_length=128, + editable=False, + unique=True, + ) + label = models.CharField( + help_text="Chaincode Label", + max_length=128, + ) + creator = models.ForeignKey( + UserProfile, + help_text="Chaincode Creator", + on_delete=models.SET_NULL, + null=True, + ) + channel = models.ForeignKey( + Channel, + help_text="Chaincode Channel", + on_delete=models.CASCADE, + related_name="chaincodes", + ) + language = models.CharField( + help_text="Chaincode Language", + max_length=128, + ) + description = models.CharField( + help_text="Chaincode Description", + max_length=128, + blank=True, + null=True, + ) + created_at = models.DateTimeField( + help_text="Chaincode Creation Timestamp", + auto_now_add=True, + ) + + class Meta: + ordering = ("-created_at",) diff --git a/src/api-engine/chaincode/serializers.py b/src/api-engine/chaincode/serializers.py new file mode 100644 index 000000000..8679f343e --- /dev/null +++ b/src/api-engine/chaincode/serializers.py @@ -0,0 +1,25 @@ +from rest_framework import serializers + +from chaincode.models import Chaincode +from common.serializers import ListResponseSerializer + +class ChaincodeID(serializers.Serializer): + id = serializers.UUIDField(help_text="ChainCode ID") + +class ChaincodeResponse( + ChaincodeID, serializers.ModelSerializer): + class Meta: + model = Chaincode + fields = ( + "id", + "package_id", + "label", + "creator", + "language", + "created_at", + "description", + ) + +class ChaincodeList(ListResponseSerializer): + data = ChaincodeResponse(many=True, help_text="Chaincode data") + diff --git a/src/api-engine/api/routes/channel/__init__.py b/src/api-engine/chaincode/service.py similarity index 100% rename from src/api-engine/api/routes/channel/__init__.py rename to src/api-engine/chaincode/service.py diff --git a/src/api-engine/chaincode/views.py b/src/api-engine/chaincode/views.py index 91ea44a21..e15845732 100644 --- a/src/api-engine/chaincode/views.py +++ b/src/api-engine/chaincode/views.py @@ -1,3 +1,43 @@ from django.shortcuts import render +from drf_yasg.utils import swagger_auto_schema +from rest_framework import viewsets, status +from rest_framework.permissions import IsAuthenticated +from rest_framework.response import Response + +from api.common.response import make_response_serializer +from chaincode.models import Chaincode +from chaincode.serializers import ChaincodeList +from channel.serializers import ChannelResponse +from common.responses import with_common_response, ok +from common.serializers import PageQuerySerializer + # Create your views here. +class ChaincodeViewSet(viewsets.ViewSet): + permission_classes = [ + IsAuthenticated, + ] + + @swagger_auto_schema( + operation_summary="List all chaincodes of the current organization", + query_serializer=PageQuerySerializer(), + responses=with_common_response( + {status.HTTP_200_OK: make_response_serializer(ChaincodeList)} + ), + ) + def list(self, request): + serializer = PageQuerySerializer(data=request.GET) + p = serializer.get_paginator( + Chaincode.objects.filter(channel__organizations__id__contains=request.user.organization.id), + ) + return Response( + status=status.HTTP_200_OK, + data=ok(ChaincodeList({ + "total": p.count, + "data": ChannelResponse( + p.get_page(serializer.data["page"]).object_list, + many=True + ).data, + }).data), + ) + diff --git a/src/api-engine/node/models.py b/src/api-engine/node/models.py index ba33d930f..b979e14f9 100644 --- a/src/api-engine/node/models.py +++ b/src/api-engine/node/models.py @@ -57,11 +57,6 @@ class Status(models.TextChoices): help_text="Node TLS", null=True, ) - cid = models.CharField( - help_text="Node Container ID", - max_length=256, - default="", - ) class Meta: ordering = ("-created_at",) diff --git a/src/api-engine/node/serializers.py b/src/api-engine/node/serializers.py index d41cdff1c..09de1ad8f 100644 --- a/src/api-engine/node/serializers.py +++ b/src/api-engine/node/serializers.py @@ -19,7 +19,6 @@ class Meta: "name", "created_at", "status", - "cid", ) From 407eb5cc04648c3fd6b7db19bce1b81a2e2dabd8 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Sat, 27 Sep 2025 23:12:06 +0800 Subject: [PATCH 04/67] Finish upload chaincodes Signed-off-by: dodo920306 --- src/api-engine/chaincode/serializers.py | 35 +++++++++++++++--- src/api-engine/chaincode/views.py | 26 ++++++++++++-- src/api-engine/channel/serializers.py | 47 +++++++++++++++++++------ src/api-engine/node/service.py | 5 +++ 4 files changed, 96 insertions(+), 17 deletions(-) diff --git a/src/api-engine/chaincode/serializers.py b/src/api-engine/chaincode/serializers.py index 8679f343e..467b06d41 100644 --- a/src/api-engine/chaincode/serializers.py +++ b/src/api-engine/chaincode/serializers.py @@ -1,13 +1,16 @@ +import tarfile + from rest_framework import serializers from chaincode.models import Chaincode from common.serializers import ListResponseSerializer -class ChaincodeID(serializers.Serializer): - id = serializers.UUIDField(help_text="ChainCode ID") +class ChaincodeID(serializers.ModelSerializer): + class Meta: + model = Chaincode + fields = ("id",) -class ChaincodeResponse( - ChaincodeID, serializers.ModelSerializer): +class ChaincodeResponse(ChaincodeID): class Meta: model = Chaincode fields = ( @@ -23,3 +26,27 @@ class Meta: class ChaincodeList(ListResponseSerializer): data = ChaincodeResponse(many=True, help_text="Chaincode data") +class ChaincodeCreateBody(serializers.Serializer): + file = serializers.FileField() + description = serializers.CharField(max_length=128, required=False) + + @staticmethod + def validate_file(value): + if not value.name.endswith(".tar.gz"): + raise serializers.ValidationError("Chaincode Package must be a '.tar.gz' file.") + + if value.content_type != "application/gzip": + raise serializers.ValidationError( + "Chaincode Package is not a 'application/gzip' file but {} instead." + .format(value.content_type) + ) + + try: + value.seek(0) + with tarfile.open(fileobj=value, mode='r:gz') as tar: + tar.getmembers() + value.seek(0) + except tarfile.TarError: + raise serializers.ValidationError("Failed to open the chaincode tar package.") + + return value diff --git a/src/api-engine/chaincode/views.py b/src/api-engine/chaincode/views.py index e15845732..b280e843d 100644 --- a/src/api-engine/chaincode/views.py +++ b/src/api-engine/chaincode/views.py @@ -1,15 +1,18 @@ -from django.shortcuts import render +from drf_yasg import openapi from drf_yasg.utils import swagger_auto_schema from rest_framework import viewsets, status +from rest_framework.decorators import action +from rest_framework.parsers import FileUploadParser, JSONParser, FormParser, MultiPartParser from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from api.common.response import make_response_serializer from chaincode.models import Chaincode -from chaincode.serializers import ChaincodeList +from chaincode.serializers import ChaincodeList, ChaincodeCreateBody, ChaincodeID from channel.serializers import ChannelResponse from common.responses import with_common_response, ok from common.serializers import PageQuerySerializer +from common.utils import make_uuid # Create your views here. @@ -18,6 +21,11 @@ class ChaincodeViewSet(viewsets.ViewSet): IsAuthenticated, ] + def get_parsers(self): + if getattr(self, 'action', None) == "create" or getattr(getattr(self, 'request', None), "FILES", None) is not None: + return [MultiPartParser] + return [JSONParser] + @swagger_auto_schema( operation_summary="List all chaincodes of the current organization", query_serializer=PageQuerySerializer(), @@ -41,3 +49,17 @@ def list(self, request): }).data), ) + @swagger_auto_schema( + operation_summary="Create a chaincode of the current organization", + request_body=ChaincodeCreateBody(), + responses=with_common_response( + {status.HTTP_201_CREATED: make_response_serializer(ChaincodeID)} + ), + ) + def create(self, request): + serializer = ChaincodeCreateBody(data=request.data) + serializer.is_valid(raise_exception=True) + return Response( + status=status.HTTP_201_CREATED, + data=ok(ChaincodeID({"id": make_uuid()}).data) + ) diff --git a/src/api-engine/channel/serializers.py b/src/api-engine/channel/serializers.py index fd188ee5d..1824c6892 100644 --- a/src/api-engine/channel/serializers.py +++ b/src/api-engine/channel/serializers.py @@ -6,6 +6,7 @@ from channel.service import create from common.serializers import ListResponseSerializer from node.models import Node +from node.service import get_node class ChannelID(serializers.Serializer): @@ -17,7 +18,12 @@ class ChannelResponse( ): class Meta: model = Channel - fields = ("id", "name", "organizations", "created_at") + fields = ( + "id", + "name", + "organizations", + "created_at" + ) class ChannelList(ListResponseSerializer): @@ -33,20 +39,39 @@ class ChannelCreateBody(serializers.Serializer): child=serializers.UUIDField(help_text="ID of Orderer Nodes") ) - def validate(self, data): - peer_ids = data["peer_ids"] - orderer_ids = data["orderer_ids"] + @staticmethod + def validate_peer_ids(value): + if len(value) < 1: + raise serializers.ValidationError("You must specify at least one peer for a channel.") - if len(peer_ids) < 1: - raise serializers.ValidationError("Invalid peers") - if len(orderer_ids) < 1: - raise serializers.ValidationError("Invalid orderers") + for peer_id in value: + node = get_node(peer_id) + if node is None: + raise serializers.ValidationError("Peer {} not found.".format(peer_id)) + if node.type != Node.Type.PEER: + raise serializers.ValidationError( + "Node {} is not a peer but {} instead.".format(peer_id, node.type)) + if node.status != Node.Status.RUNNING: + raise serializers.ValidationError("Peer {} is not running.".format(peer_id)) + + return value + + @staticmethod + def validate_orderer_ids(value): + if len(value) < 1: + raise serializers.ValidationError("You must specify at least one orderer for a channel.") - for node in Node.objects.filter(id__in=(peer_ids + orderer_ids)): + for orderer_id in value: + node = get_node(orderer_id) + if node is None: + raise serializers.ValidationError("Orderer {} not found.".format(orderer_id)) + if node.type != Node.Type.PEER: + raise serializers.ValidationError( + "Node {} is not an orderer but {} instead.".format(orderer_id, node.type)) if node.status != Node.Status.RUNNING: - raise serializers.ValidationError("Node {} is not running".format(node.name)) + raise serializers.ValidationError("Orderer {} is not running.".format(orderer_id)) - return data + return value def create(self, validated_data:Dict[str, Any]) -> ChannelID: return ChannelID(create( diff --git a/src/api-engine/node/service.py b/src/api-engine/node/service.py index 6c40df912..2e42a030c 100644 --- a/src/api-engine/node/service.py +++ b/src/api-engine/node/service.py @@ -13,6 +13,11 @@ from node.models import Node from organization.models import Organization +def get_node(node_id: str) -> Optional[Node]: + try: + return Node.objects.get(id=node_id) + except Node.DoesNotExist: + return None def create(organization: Organization, node_type: Node.Type, node_name: str) -> Node: CryptoConfig(organization.name).update({"type": node_type, "Specs": [node_name]}) From be87027fc78c57cf3dd50541262e8aa438964629 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Sun, 28 Sep 2025 11:57:23 +0800 Subject: [PATCH 05/67] Finish chaincode creation Signed-off-by: dodo920306 --- src/api-engine/api_engine/settings.py | 4 +- src/api-engine/auth/serializers.py | 4 +- src/api-engine/auth/views.py | 26 +++++------ src/api-engine/chaincode/models.py | 28 +++++++++++- src/api-engine/chaincode/serializers.py | 49 +++++++++++++++++---- src/api-engine/chaincode/service.py | 36 +++++++++++++++ src/api-engine/chaincode/views.py | 16 ++++--- src/api-engine/channel/models.py | 10 ++--- src/api-engine/channel/serializers.py | 13 +++--- src/api-engine/channel/views.py | 9 ++-- src/api-engine/node/serializers.py | 8 ++-- src/api-engine/node/views.py | 12 ++--- src/api-engine/organization/serializeres.py | 25 ++++++----- src/api-engine/user/serializers.py | 10 ++--- src/api-engine/user/views.py | 24 +++++----- 15 files changed, 183 insertions(+), 91 deletions(-) diff --git a/src/api-engine/api_engine/settings.py b/src/api-engine/api_engine/settings.py index 1854d3969..59162c09c 100644 --- a/src/api-engine/api_engine/settings.py +++ b/src/api-engine/api_engine/settings.py @@ -137,7 +137,7 @@ # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.1/howto/static-files/ WEBROOT = os.path.join(os.getenv("WEB_PREFIX", ""), "api", os.getenv("API_VERSION", "v1")) + "/" -STATIC_URL = "/static/" +STATIC_URL = "static/" STATIC_ROOT = os.path.join(BASE_DIR, STATIC_URL) REST_FRAMEWORK = { @@ -214,7 +214,7 @@ MAX_AGENT_CAPACITY = 100 -MEDIA_URL = "/media/" +MEDIA_URL = "media/" MEDIA_ROOT = os.path.join(BASE_DIR, MEDIA_URL) SIMPLE_JWT = { diff --git a/src/api-engine/auth/serializers.py b/src/api-engine/auth/serializers.py index 90a491472..7de32cd6a 100644 --- a/src/api-engine/auth/serializers.py +++ b/src/api-engine/auth/serializers.py @@ -6,7 +6,7 @@ from api.lib.pki import CryptoConfig, CryptoGen from organization.models import Organization from user.models import UserProfile -from user.serializers import UserInfoSerializer +from user.serializers import UserInfo class RegisterBody(serializers.Serializer): @@ -59,7 +59,7 @@ class LoginBody(serializers.Serializer): class LoginSuccessBody(serializers.Serializer): token = serializers.CharField(help_text="access token") - user = UserInfoSerializer() + user = UserInfo() class TokenVerifyRequest(serializers.Serializer): token = serializers.CharField(help_text="access token") diff --git a/src/api-engine/auth/views.py b/src/api-engine/auth/views.py index e3da64ec9..03a75cbaa 100644 --- a/src/api-engine/auth/views.py +++ b/src/api-engine/auth/views.py @@ -15,7 +15,7 @@ from api.utils.common import with_common_response from auth.serializers import RegisterBody, RegisterResponse, LoginBody, LoginSuccessBody, TokenVerifyRequest from user.models import UserProfile -from user.serializers import UserInfoSerializer +from user.serializers import UserInfo LOG = logging.getLogger(__name__) @@ -52,7 +52,7 @@ class CelloTokenObtainPairView(TokenObtainPairView): {status.HTTP_200_OK: make_response_serializer(LoginSuccessBody)} ), ) - def post(self, request, *args, **kwargs): + def post(self, request: Request, *args, **kwargs): serializer = LoginBody(data=request.data) serializer.is_valid(raise_exception=True) user = authenticate( @@ -65,14 +65,11 @@ def post(self, request, *args, **kwargs): status=status.HTTP_401_UNAUTHORIZED, ) - response = LoginSuccessBody( - data={ - "token": str(AccessToken.for_user(user)), - "user": UserInfoSerializer(user).data, - }) - response.is_valid(raise_exception=True) return Response( - data=ok(response.data), + data=ok(LoginSuccessBody({ + "token": str(AccessToken.for_user(user)), + "user": UserInfo(user).data, + }).data), status=status.HTTP_200_OK, ) @@ -99,13 +96,10 @@ def post(self, request, *args, **kwargs): data=err(msg="invalid token"), status=status.HTTP_400_BAD_REQUEST) - response = LoginSuccessBody( - data={ - "token": str(access_token.token), - "user": UserInfoSerializer(user).data, - }) - response.is_valid(raise_exception=True) return Response( - data=ok(response.data), + data=ok(LoginSuccessBody({ + "token": str(access_token.token), + "user": UserInfo(user).data, + }).data), status=status.HTTP_200_OK, ) diff --git a/src/api-engine/chaincode/models.py b/src/api-engine/chaincode/models.py index 92ba2525f..2010af276 100644 --- a/src/api-engine/chaincode/models.py +++ b/src/api-engine/chaincode/models.py @@ -1,12 +1,24 @@ +import os.path + from django.db import models from channel.models import Channel from common.utils import make_uuid +from node.models import Node from user.models import UserProfile - +def get_package_path(instance, filename) -> str: + return str(os.path.join(instance.channel.name, filename)) # Create your models here. + + class Chaincode(models.Model): + class Status(models.TextChoices): + CREATED = "CREATED", "Created" + INSTALLED = "INSTALLED", "Installed" + APPROVED = "APPROVED", "Approved" + COMMITTED = "COMMITTED", "Committed" + id = models.UUIDField( primary_key=True, help_text="Chaincode ID", @@ -20,6 +32,10 @@ class Chaincode(models.Model): editable=False, unique=True, ) + package = models.FileField( + help_text="Chaincode Package", + upload_to=get_package_path, + ) label = models.CharField( help_text="Chaincode Label", max_length=128, @@ -40,6 +56,16 @@ class Chaincode(models.Model): help_text="Chaincode Language", max_length=128, ) + status = models.CharField( + help_text="Chaincode Status", + choices=Status.choices, + default=Status.CREATED, + max_length=16, + ) + peers = models.ManyToManyField( + to=Node, + help_text="Chaincode Installed Peers", + ) description = models.CharField( help_text="Chaincode Description", max_length=128, diff --git a/src/api-engine/chaincode/serializers.py b/src/api-engine/chaincode/serializers.py index 467b06d41..f36b4a46b 100644 --- a/src/api-engine/chaincode/serializers.py +++ b/src/api-engine/chaincode/serializers.py @@ -3,7 +3,14 @@ from rest_framework import serializers from chaincode.models import Chaincode +from chaincode.service import create_chaincode, get_metadata_label +from channel.models import Channel +from channel.serializers import ChannelID from common.serializers import ListResponseSerializer +from node.models import Node +from node.serializers import NodeID +from user.serializers import UserID + class ChaincodeID(serializers.ModelSerializer): class Meta: @@ -11,6 +18,9 @@ class Meta: fields = ("id",) class ChaincodeResponse(ChaincodeID): + channel = ChannelID() + creator = UserID() + class Meta: model = Chaincode fields = ( @@ -18,7 +28,9 @@ class Meta: "package_id", "label", "creator", + "channel", "language", + "status", "created_at", "description", ) @@ -26,12 +38,25 @@ class Meta: class ChaincodeList(ListResponseSerializer): data = ChaincodeResponse(many=True, help_text="Chaincode data") -class ChaincodeCreateBody(serializers.Serializer): - file = serializers.FileField() - description = serializers.CharField(max_length=128, required=False) +class ChaincodeCreateBody(serializers.ModelSerializer): + peers = serializers.PrimaryKeyRelatedField( + many=True, + queryset=Node.objects.filter(type=Node.Type.PEER), + help_text="Chaincode Peers" + ) + + + class Meta: + model = Chaincode + fields = ( + "package", + "channel", + "peers", + "description", + ) @staticmethod - def validate_file(value): + def validate_package(value): if not value.name.endswith(".tar.gz"): raise serializers.ValidationError("Chaincode Package must be a '.tar.gz' file.") @@ -42,11 +67,19 @@ def validate_file(value): ) try: - value.seek(0) - with tarfile.open(fileobj=value, mode='r:gz') as tar: - tar.getmembers() - value.seek(0) + metadata = get_metadata_label(value) + if metadata is None: + raise serializers.ValidationError("Metadata not found.") except tarfile.TarError: raise serializers.ValidationError("Failed to open the chaincode tar package.") return value + + def validate_channel(self, value: Channel): + if not value.organizations.contains(self.context["organization"]): + raise serializers.ValidationError("You can only install chaincodes on your organization.") + return value + + def create(self, validated_data) -> ChaincodeID: + validated_data["user"] = self.context["user"] + return ChaincodeID({"id": create_chaincode(**validated_data).id}) diff --git a/src/api-engine/chaincode/service.py b/src/api-engine/chaincode/service.py index e69de29bb..399086bfd 100644 --- a/src/api-engine/chaincode/service.py +++ b/src/api-engine/chaincode/service.py @@ -0,0 +1,36 @@ +import json +import tarfile +from typing import Optional, List + +from chaincode.models import Chaincode +from channel.models import Channel +from node.models import Node +from user.models import UserProfile + + +def create_chaincode(package, channel: Channel, user: UserProfile, peers: List[Node], description: str) -> Chaincode: + chaincode = Chaincode( + package=package, + channel=channel, + creator=user, + description=description, + ) + chaincode.save() + chaincode.peers.add(*peers) + return chaincode + + +def get_metadata_label(file) -> Optional[str]: + file.seek(0) + res = None + with tarfile.open(fileobj=file, mode='r:gz') as tar: + for member in tar.getmembers(): + if member.name.endswith("metadata.json"): + res = json.loads( + tar.extractfile(member) + .read() + .decode("utf-8") + ).get("label") + break + file.seek(0) + return res diff --git a/src/api-engine/chaincode/views.py b/src/api-engine/chaincode/views.py index b280e843d..f42841d0f 100644 --- a/src/api-engine/chaincode/views.py +++ b/src/api-engine/chaincode/views.py @@ -8,11 +8,9 @@ from api.common.response import make_response_serializer from chaincode.models import Chaincode -from chaincode.serializers import ChaincodeList, ChaincodeCreateBody, ChaincodeID -from channel.serializers import ChannelResponse +from chaincode.serializers import ChaincodeList, ChaincodeCreateBody, ChaincodeID, ChaincodeResponse from common.responses import with_common_response, ok from common.serializers import PageQuerySerializer -from common.utils import make_uuid # Create your views here. @@ -42,8 +40,9 @@ def list(self, request): status=status.HTTP_200_OK, data=ok(ChaincodeList({ "total": p.count, - "data": ChannelResponse( - p.get_page(serializer.data["page"]).object_list, + "data": ChaincodeResponse( + p.get_page(serializer.data["page"]) + .object_list, many=True ).data, }).data), @@ -57,9 +56,12 @@ def list(self, request): ), ) def create(self, request): - serializer = ChaincodeCreateBody(data=request.data) + serializer = ChaincodeCreateBody(data=request.data, context={ + "user": request.user, + "organization": request.user.organization, + }) serializer.is_valid(raise_exception=True) return Response( status=status.HTTP_201_CREATED, - data=ok(ChaincodeID({"id": make_uuid()}).data) + data=ok(serializer.save().data) ) diff --git a/src/api-engine/channel/models.py b/src/api-engine/channel/models.py index 868977971..031cf8449 100644 --- a/src/api-engine/channel/models.py +++ b/src/api-engine/channel/models.py @@ -8,24 +8,24 @@ class Channel(models.Model): id = models.UUIDField( primary_key=True, - help_text="ID of Channel", + help_text="Channel ID", default=make_uuid, editable=False, unique=True, ) - name = models.CharField(help_text="name of channel", max_length=128) + name = models.CharField(help_text="Channel Name", max_length=128) organizations = models.ManyToManyField( to=Organization, - help_text="the organization of the channel", + help_text="Channel Organizations", related_name="channels", # on_delete=models.SET_NULL ) created_at = models.DateTimeField( - help_text="Create time of Channel", auto_now_add=True + help_text="Channel Creation Timestamp", auto_now_add=True ) orderers = models.ManyToManyField( to=Node, - help_text="Orderer list in the channel", + help_text="Channel Orderers", ) class Meta: diff --git a/src/api-engine/channel/serializers.py b/src/api-engine/channel/serializers.py index 1824c6892..cc72c9eb6 100644 --- a/src/api-engine/channel/serializers.py +++ b/src/api-engine/channel/serializers.py @@ -7,15 +7,16 @@ from common.serializers import ListResponseSerializer from node.models import Node from node.service import get_node +from organization.serializeres import OrganizationID -class ChannelID(serializers.Serializer): - id = serializers.UUIDField(help_text="Channel ID") - +class ChannelID(serializers.ModelSerializer): + class Meta: + model = Channel + fields = ("id",) -class ChannelResponse( - ChannelID, serializers.ModelSerializer -): +class ChannelResponse(serializers.ModelSerializer): + organizations = OrganizationID(many=True) class Meta: model = Channel fields = ( diff --git a/src/api-engine/channel/views.py b/src/api-engine/channel/views.py index 56a1cdc04..8270cf6cf 100644 --- a/src/api-engine/channel/views.py +++ b/src/api-engine/channel/views.py @@ -29,13 +29,12 @@ class ChannelViewSet(viewsets.ViewSet): def list(self, request): serializer = PageQuerySerializer(data=request.GET) p = serializer.get_paginator(Channel.objects.filter(organizations__id__contains=request.user.organization.id)) - response = ChannelList({ - "total": p.count, - "data": ChannelResponse(p.page(serializer.data["page"]).object_list, many=True).data, - }) return Response( status=status.HTTP_200_OK, - data=ok(response.data), + data=ChannelList({ + "total": p.count, + "data": ChannelResponse(p.page(serializer.data["page"]).object_list, many=True).data, + }).data, ) @swagger_auto_schema( diff --git a/src/api-engine/node/serializers.py b/src/api-engine/node/serializers.py index 09de1ad8f..5fe4c818b 100644 --- a/src/api-engine/node/serializers.py +++ b/src/api-engine/node/serializers.py @@ -6,11 +6,11 @@ from node.models import Node -class NodeIDSerializer(serializers.Serializer): +class NodeID(serializers.Serializer): id = serializers.UUIDField(help_text="ID of node") -class NodeResponseSerializer(NodeIDSerializer, serializers.ModelSerializer): +class NodeResponse(NodeID, serializers.ModelSerializer): class Meta: model = Node fields = ( @@ -22,8 +22,8 @@ class Meta: ) -class NodeListSerializer(ListResponseSerializer): - data = NodeResponseSerializer(many=True, help_text="Node list") +class NodeList(ListResponseSerializer): + data = NodeResponse(many=True, help_text="Node list") class NodeCreateBody(serializers.ModelSerializer): diff --git a/src/api-engine/node/views.py b/src/api-engine/node/views.py index d8483700f..f14167fab 100644 --- a/src/api-engine/node/views.py +++ b/src/api-engine/node/views.py @@ -8,7 +8,7 @@ from api.utils.common import with_common_response from common.serializers import PageQuerySerializer from node.models import Node -from node.serializers import NodeListSerializer, NodeCreateBody, NodeIDSerializer, NodeResponseSerializer +from node.serializers import NodeList, NodeCreateBody, NodeID, NodeResponse class NodeViewSet(viewsets.ViewSet): @@ -20,15 +20,15 @@ class NodeViewSet(viewsets.ViewSet): operation_summary="List all nodes of the current organization", query_serializer=PageQuerySerializer(), responses=with_common_response( - {status.HTTP_200_OK: make_response_serializer(NodeListSerializer)} + {status.HTTP_200_OK: make_response_serializer(NodeList)} ), ) def list(self, request): serializer = PageQuerySerializer(data=request.GET) p = serializer.get_paginator(Node.objects.filter(organization=request.user.organization)) - response = NodeListSerializer({ + response = NodeList({ "total": p.count, - "data": NodeResponseSerializer(p.page(serializer.data['page']).object_list, many=True).data + "data": NodeResponse(p.page(serializer.data['page']).object_list, many=True).data }) return Response( status=status.HTTP_200_OK, @@ -39,13 +39,13 @@ def list(self, request): operation_summary="Create a new node of the current organization", request_body=NodeCreateBody, responses=with_common_response( - {status.HTTP_201_CREATED: make_response_serializer(NodeIDSerializer)} + {status.HTTP_201_CREATED: make_response_serializer(NodeID)} ), ) def create(self, request): serializer = NodeCreateBody(data=request.data, context={"organization": request.user.organization}) serializer.is_valid(raise_exception=True) - response = NodeIDSerializer(serializer.save().__dict__) + response = NodeID(serializer.save().__dict__) return Response( status=status.HTTP_201_CREATED, data=ok(response.data), diff --git a/src/api-engine/organization/serializeres.py b/src/api-engine/organization/serializeres.py index edea79e64..17780fc30 100644 --- a/src/api-engine/organization/serializeres.py +++ b/src/api-engine/organization/serializeres.py @@ -1,23 +1,24 @@ from rest_framework import serializers from api.common.serializers import ListResponseSerializer +from organization.models import Organization -class OrganizationResponse(serializers.Serializer): - id = serializers.CharField(help_text="Organization ID") - name = serializers.CharField(help_text="Organization Name") - created_at = serializers.DateTimeField(help_text="Organization Creation Timestamp") +class OrganizationID(serializers.ModelSerializer): + class Meta: + model = Organization + fields = ("id",) + +class OrganizationResponse(serializers.ModelSerializer): class Meta: - fields = ("id", "name", "created_at") - extra_kwargs = { - "name": {"required": True}, - "created_at": {"required": True, "read_only": True}, - "id": {"required": True, "read_only": True}, - } + model = Organization + fields = ( + "id", + "name", + "created_at" + ) class OrganizationList(ListResponseSerializer): data = OrganizationResponse(many=True, help_text="Organizations list") - - diff --git a/src/api-engine/user/serializers.py b/src/api-engine/user/serializers.py index de1af95d0..e901bc80d 100644 --- a/src/api-engine/user/serializers.py +++ b/src/api-engine/user/serializers.py @@ -32,11 +32,11 @@ def create(self, validated_data: Dict[str, Any]) -> UserProfile: return user -class UserIDSerializer(serializers.Serializer): +class UserID(serializers.Serializer): id = serializers.UUIDField(help_text="User ID") -class UserInfoSerializer(UserIDSerializer, serializers.Serializer): +class UserInfo(UserID, serializers.Serializer): email = serializers.EmailField(help_text="User Email") role = serializers.CharField(help_text="User Role") organization = OrganizationResponse(help_text="User Organization") @@ -46,11 +46,11 @@ class Meta: fields = ("id", "email", "role", "organization", "created_at") -class UserListSerializer(ListResponseSerializer): - data = UserInfoSerializer(many=True, help_text="Users list") +class UserList(ListResponseSerializer): + data = UserInfo(many=True, help_text="Users list") -class UserPasswordUpdateSerializer(serializers.Serializer): +class UserPasswordUpdate(serializers.Serializer): password = serializers.CharField( help_text="New password for login", max_length=64 ) diff --git a/src/api-engine/user/views.py b/src/api-engine/user/views.py index 6c767cbf7..9f26dba34 100644 --- a/src/api-engine/user/views.py +++ b/src/api-engine/user/views.py @@ -20,9 +20,9 @@ from common.serializers import PageQuerySerializer from user.serializers import ( UserCreateBody, - UserIDSerializer, - UserListSerializer, - UserPasswordUpdateSerializer, UserInfoSerializer, + UserID, + UserList, + UserPasswordUpdate, UserInfo, ) from api.utils.common import with_common_response from user.models import UserProfile @@ -37,17 +37,17 @@ class UserViewSet(viewsets.ViewSet): operation_summary="List users in the current organization", query_serializer=PageQuerySerializer(), responses=with_common_response( - {status.HTTP_200_OK: make_response_serializer(UserListSerializer)} + {status.HTTP_200_OK: make_response_serializer(UserList)} ), ) def list(self, request: Request) -> Response: serializer = PageQuerySerializer(data=request.GET) p = serializer.get_paginator(UserProfile.objects.filter(organization=request.user.organization)) - response = UserListSerializer( + response = UserList( data = { "total": p.count, - "data": UserInfoSerializer( + "data": UserInfo( p.page(serializer.data['page']).object_list, many=True ).data, @@ -62,13 +62,13 @@ def list(self, request: Request) -> Response: operation_summary="Create a user in the current organization", request_body=UserCreateBody, responses=with_common_response( - {status.HTTP_201_CREATED: make_response_serializer(UserIDSerializer)} + {status.HTTP_201_CREATED: make_response_serializer(UserID)} ), ) def create(self, request: Request) -> Response: serializer = UserCreateBody(data=request.data, context={"organization": request.user.organization}) serializer.is_valid(raise_exception=True) - response = UserIDSerializer(data={"id": serializer.save().id}) + response = UserID(data={"id": serializer.save().id}) response.is_valid(raise_exception=True) return Response( status = status.HTTP_201_CREATED, @@ -91,7 +91,7 @@ def destroy(self, request: Request, pk: Optional[str] = None) -> Response: @swagger_auto_schema( method="PUT", operation_summary="Update the current user's password", - request_body=UserPasswordUpdateSerializer, + request_body=UserPasswordUpdate, responses=with_common_response({status.HTTP_204_NO_CONTENT: "No Content"}), ) @action( @@ -100,7 +100,7 @@ def destroy(self, request: Request, pk: Optional[str] = None) -> Response: url_path="password", ) def password(self, request: Request) -> Response: - serializer = UserPasswordUpdateSerializer(data=request.data, context={"request": request}) + serializer = UserPasswordUpdate(data=request.data, context={"request": request}) serializer.is_valid(raise_exception=True) serializer.save() return Response(status = status.HTTP_204_NO_CONTENT) @@ -108,7 +108,7 @@ def password(self, request: Request) -> Response: @swagger_auto_schema( method="GET", operation_summary="Get the current user", - responses=with_common_response({status.HTTP_200_OK: make_response_serializer(UserInfoSerializer)}), + responses=with_common_response({status.HTTP_200_OK: make_response_serializer(UserInfo)}), ) @action( methods=["GET"], @@ -118,5 +118,5 @@ def password(self, request: Request) -> Response: def profile(self, request: Request) -> Response: return Response( status = status.HTTP_200_OK, - data=ok(UserInfoSerializer(request.user).data), + data=ok(UserInfo(request.user).data), ) From 715ed1f2d97ff534a49dffccea2f714959aad867 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 08:34:31 +0800 Subject: [PATCH 06/67] Finish chaincode commit Signed-off-by: dodo920306 --- src/api-engine/api_engine/settings.py | 6 + src/api-engine/chaincode/models.py | 22 ++++ src/api-engine/chaincode/serializers.py | 19 ++- src/api-engine/chaincode/service.py | 165 +++++++++++++++++++++++- src/api-engine/chaincode/views.py | 2 +- src/api-engine/channel/serializers.py | 2 +- src/api-engine/channel/service.py | 23 ++-- src/api-engine/node/service.py | 5 +- src/api-engine/user/serializers.py | 25 ++-- src/api-engine/user/views.py | 1 - 10 files changed, 236 insertions(+), 34 deletions(-) diff --git a/src/api-engine/api_engine/settings.py b/src/api-engine/api_engine/settings.py index 59162c09c..50003aced 100644 --- a/src/api-engine/api_engine/settings.py +++ b/src/api-engine/api_engine/settings.py @@ -209,6 +209,12 @@ "propagate": False, }, "api": {"handlers": ["console"], "level": "DEBUG", "propagate": False}, + "auth": {"handlers": ["console"], "level": "DEBUG", "propagate": False}, + "user": {"handlers": ["console"], "level": "DEBUG", "propagate": False}, + "organization": {"handlers": ["console"], "level": "DEBUG", "propagate": False}, + "node": {"handlers": ["console"], "level": "DEBUG", "propagate": False}, + "channel": {"handlers": ["console"], "level": "DEBUG", "propagate": False}, + "chaincode": {"handlers": ["console"], "level": "DEBUG", "propagate": False}, }, } diff --git a/src/api-engine/chaincode/models.py b/src/api-engine/chaincode/models.py index 2010af276..f2879c4d6 100644 --- a/src/api-engine/chaincode/models.py +++ b/src/api-engine/chaincode/models.py @@ -1,5 +1,6 @@ import os.path +from django.core.validators import MinValueValidator from django.db import models from channel.models import Channel @@ -36,6 +37,18 @@ class Status(models.TextChoices): help_text="Chaincode Package", upload_to=get_package_path, ) + name = models.CharField( + help_text="Chaincode Name", + max_length=128, + ) + version = models.CharField( + help_text="Chaincode Version", + max_length=128, + ) + sequence = models.IntegerField( + help_text="Chaincode Sequence", + validators=[MinValueValidator(1)], + ) label = models.CharField( help_text="Chaincode Label", max_length=128, @@ -56,6 +69,15 @@ class Status(models.TextChoices): help_text="Chaincode Language", max_length=128, ) + init_required = models.BooleanField( + help_text="Whether Chaincode Initialization Required", + default=False, + ) + signature_policy = models.CharField( + help_text="Chaincode Signature Policy", + null=True, + blank=True, + ) status = models.CharField( help_text="Chaincode Status", choices=Status.choices, diff --git a/src/api-engine/chaincode/serializers.py b/src/api-engine/chaincode/serializers.py index f36b4a46b..07ce6a74b 100644 --- a/src/api-engine/chaincode/serializers.py +++ b/src/api-engine/chaincode/serializers.py @@ -1,9 +1,9 @@ import tarfile +from django.core.validators import MinValueValidator from rest_framework import serializers - from chaincode.models import Chaincode -from chaincode.service import create_chaincode, get_metadata_label +from chaincode.service import create_chaincode, get_metadata from channel.models import Channel from channel.serializers import ChannelID from common.serializers import ListResponseSerializer @@ -49,11 +49,23 @@ class ChaincodeCreateBody(serializers.ModelSerializer): class Meta: model = Chaincode fields = ( + "name", + "version", + "sequence", + "init_required", + "signature_policy", "package", "channel", "peers", "description", ) + extra_kwargs = { + "sequence": { + "validators": [MinValueValidator(1)] + }, + "init_required": {"required": False}, + "signature_policy": {"required": False}, + } @staticmethod def validate_package(value): @@ -67,7 +79,7 @@ def validate_package(value): ) try: - metadata = get_metadata_label(value) + metadata = get_metadata(value) if metadata is None: raise serializers.ValidationError("Metadata not found.") except tarfile.TarError: @@ -82,4 +94,5 @@ def validate_channel(self, value: Channel): def create(self, validated_data) -> ChaincodeID: validated_data["user"] = self.context["user"] + validated_data["organization"] = self.context["organization"] return ChaincodeID({"id": create_chaincode(**validated_data).id}) diff --git a/src/api-engine/chaincode/service.py b/src/api-engine/chaincode/service.py index 399086bfd..649a9bea1 100644 --- a/src/api-engine/chaincode/service.py +++ b/src/api-engine/chaincode/service.py @@ -1,26 +1,183 @@ import json +import logging +import os +import subprocess import tarfile -from typing import Optional, List +from typing import Optional, List, Any, Dict +from django.db import transaction + +from api_engine.settings import FABRIC_TOOL from chaincode.models import Chaincode from channel.models import Channel from node.models import Node +from node.service import get_domain_name, get_peer_directory, get_org_directory, get_orderer_directory +from organization.models import Organization from user.models import UserProfile +LOG = logging.getLogger(__name__) + +def create_chaincode( + name: str, + version: str, + sequence: int, + package, + channel: Channel, + user: UserProfile, + organization: Organization, + peers: List[Node], + description: str, + init_required: bool = False, + signature_policy: str = None) -> Chaincode: + peer_command = os.path.join(FABRIC_TOOL, "peer") + metadata = get_metadata(package) -def create_chaincode(package, channel: Channel, user: UserProfile, peers: List[Node], description: str) -> Chaincode: chaincode = Chaincode( + name=name, + version=version, + sequence=sequence, + label=metadata["label"], + language=metadata["type"], package=package, + init_required=init_required, + signature_policy=signature_policy, channel=channel, creator=user, description=description, ) chaincode.save() chaincode.peers.add(*peers) + + peer_organization_name = organization.name.split(".", 1)[0].capitalize() + peer_msp = "{}MSP".format(peer_organization_name) + peer_root_certs = [] + peer_addresses = [] + peer_envs = [] + for peer_domain_name in [get_domain_name(organization.name, Node.Type.PEER, peer.name) for peer in peers]: + peer_dir = get_peer_directory(organization.name, peer_domain_name) + peer_root_cert = os.path.join(peer_dir, "tls/ca.crt") + peer_address = "{}:7051".format(peer_domain_name) + peer_envs.append({ + "CORE_PEER_TLS_ENABLED": "true", + "CORE_PEER_LOCALMSPID": peer_msp, + "CORE_PEER_TLS_ROOTCERT_FILE": peer_root_cert, + "CORE_PEER_MSPCONFIGPATH": "{}/users/Admin@{}/msp".format( + get_org_directory(organization.name, Node.Type.PEER), + organization.name + ), + "CORE_PEER_ADDRESS": peer_address, + "FABRIC_CFG_PATH": peer_dir, + }) + peer_root_certs.append(peer_root_cert) + peer_addresses.append(peer_address) + + command = [ + peer_command, + "lifecycle", + "chaincode", + "calculatepackageid", + chaincode.package.path + ] + LOG.info(" ".join(command)) + with transaction.atomic(): + chaincode.package_id = subprocess.run( + command, + env=peer_envs[0], + check=True, + capture_output=True, + text=True + ).stdout + + chaincode.save() + + chaincode = Chaincode.objects.get(name=name) + + command = [ + peer_command, + "lifecycle", + "chaincode", + "install", + chaincode.package.path, + ] + LOG.info(" ".join(command)) + for peer_env in peer_envs: + subprocess.run( + command, + env=peer_env, + check=True) + + with transaction.atomic(): + chaincode.status = Chaincode.Status.INSTALLED + chaincode.save() + + orderer_domain_name = get_domain_name( + organization.name, + Node.Type.ORDERER, + Node.objects.filter(organization=organization).first().name + ) + command = [ + peer_command, + "lifecycle", + "chaincode", + "approveformyorg", + "-o", + "{}:7050".format(orderer_domain_name), + "--ordererTLSHostnameOverride", + orderer_domain_name, + "--channelID", + channel.name, + "--name", + chaincode.name, + "--version", + chaincode.version, + "--package-id", + chaincode.package_id, + "--sequence", + str(chaincode.sequence), + "--tls", + "--cafile", + "{}/msp/tlscacerts/tlsca.{}-cert.pem".format( + get_orderer_directory(organization.name, orderer_domain_name), + organization.name.split(".", 1)[1], + ) + ] + if chaincode.init_required: + command.append("--init-required") + if chaincode.signature_policy and chaincode.signature_policy.strip(): + command.extend(["--signature-policy", chaincode.signature_policy]) + + LOG.info(" ".join(command)) + for peer_env in peer_envs: + subprocess.run( + command, + env=peer_env, + check=True) + + with transaction.atomic(): + chaincode.status = Chaincode.Status.APPROVED + chaincode.save() + + command[3] = "commit" + # Remove package ID + del command[14:16] + for i in range(len(peers)): + command.extend(["--peerAddresses", peer_addresses[i], "--tlsRootCertFiles", peer_root_certs[i]]) + + LOG.info(" ".join(command)) + for peer_env in peer_envs: + subprocess.run( + command, + env=peer_env, + check=True) + + with transaction.atomic(): + chaincode.status = Chaincode.Status.COMMITTED + chaincode.save() + return chaincode -def get_metadata_label(file) -> Optional[str]: +def get_metadata(file) -> Optional[Dict[str, Any]]: file.seek(0) res = None with tarfile.open(fileobj=file, mode='r:gz') as tar: @@ -30,7 +187,7 @@ def get_metadata_label(file) -> Optional[str]: tar.extractfile(member) .read() .decode("utf-8") - ).get("label") + ) break file.seek(0) return res diff --git a/src/api-engine/chaincode/views.py b/src/api-engine/chaincode/views.py index f42841d0f..d48fb90bc 100644 --- a/src/api-engine/chaincode/views.py +++ b/src/api-engine/chaincode/views.py @@ -49,7 +49,7 @@ def list(self, request): ) @swagger_auto_schema( - operation_summary="Create a chaincode of the current organization", + operation_summary="Create (Install, Approve, and Commit) a chaincode of the current organization", request_body=ChaincodeCreateBody(), responses=with_common_response( {status.HTTP_201_CREATED: make_response_serializer(ChaincodeID)} diff --git a/src/api-engine/channel/serializers.py b/src/api-engine/channel/serializers.py index cc72c9eb6..2955d7b64 100644 --- a/src/api-engine/channel/serializers.py +++ b/src/api-engine/channel/serializers.py @@ -66,7 +66,7 @@ def validate_orderer_ids(value): node = get_node(orderer_id) if node is None: raise serializers.ValidationError("Orderer {} not found.".format(orderer_id)) - if node.type != Node.Type.PEER: + if node.type != Node.Type.ORDERER: raise serializers.ValidationError( "Node {} is not an orderer but {} instead.".format(orderer_id, node.type)) if node.status != Node.Status.RUNNING: diff --git a/src/api-engine/channel/service.py b/src/api-engine/channel/service.py index 9a2a5b49f..7d91af98c 100644 --- a/src/api-engine/channel/service.py +++ b/src/api-engine/channel/service.py @@ -185,20 +185,19 @@ def create( ] LOG.info(" ".join(command)) peer_dir = get_peer_directory(channel_organization.name, peer_domain_name) - env = { - "CORE_PEER_TLS_ENABLED": "true", - "CORE_PEER_LOCALMSPID": peer_msp, - "CORE_PEER_TLS_ROOTCERT_FILE": "{}/tls/ca.crt".format(peer_dir), - "CORE_PEER_MSPCONFIGPATH": "{}/users/Admin@{}/msp".format( - get_org_directory(channel_organization.name, Node.Type.PEER), - channel_organization.name - ), - "CORE_PEER_ADDRESS": "{}:7051".format(peer_domain_name), - "FABRIC_CFG_PATH": peer_dir, - } subprocess.run( command, - env=env, + env={ + "CORE_PEER_TLS_ENABLED": "true", + "CORE_PEER_LOCALMSPID": peer_msp, + "CORE_PEER_TLS_ROOTCERT_FILE": "{}/tls/ca.crt".format(peer_dir), + "CORE_PEER_MSPCONFIGPATH": "{}/users/Admin@{}/msp".format( + get_org_directory(channel_organization.name, Node.Type.PEER), + channel_organization.name + ), + "CORE_PEER_ADDRESS": "{}:7051".format(peer_domain_name), + "FABRIC_CFG_PATH": peer_dir, + }, check=True) command = [ diff --git a/src/api-engine/node/service.py b/src/api-engine/node/service.py index 2e42a030c..ad07a81a4 100644 --- a/src/api-engine/node/service.py +++ b/src/api-engine/node/service.py @@ -47,8 +47,9 @@ def create(organization: Organization, node_type: Node.Type, node_name: str) -> stdin_open=True, network="cello-net", name=node_domain_name, - dns_search=["."], - # volumes=volumes, + volumes=[ + "/var/run/docker.sock:/host/var/run/docker.sock" + ], environment=_get_node_env(node_type, node_domain_name, msp, tls, cfg), # ports=port_map, ) diff --git a/src/api-engine/user/serializers.py b/src/api-engine/user/serializers.py index e901bc80d..de67275e7 100644 --- a/src/api-engine/user/serializers.py +++ b/src/api-engine/user/serializers.py @@ -5,7 +5,7 @@ from rest_framework import serializers from api.common.serializers import ListResponseSerializer -from organization.serializeres import OrganizationResponse +from organization.serializeres import OrganizationID from user.models import UserProfile @@ -32,18 +32,23 @@ def create(self, validated_data: Dict[str, Any]) -> UserProfile: return user -class UserID(serializers.Serializer): - id = serializers.UUIDField(help_text="User ID") - +class UserID(serializers.ModelSerializer): + class Meta: + model = UserProfile + fields = ("id",) -class UserInfo(UserID, serializers.Serializer): - email = serializers.EmailField(help_text="User Email") - role = serializers.CharField(help_text="User Role") - organization = OrganizationResponse(help_text="User Organization") - created_at = serializers.DateTimeField(help_text="User Creation Timestamp") +class UserInfo(serializers.ModelSerializer): + organization = OrganizationID() class Meta: - fields = ("id", "email", "role", "organization", "created_at") + model = UserProfile + fields = ( + "id", + "email", + "role", + "organization", + "created_at" + ) class UserList(ListResponseSerializer): diff --git a/src/api-engine/user/views.py b/src/api-engine/user/views.py index 9f26dba34..586ef9bbc 100644 --- a/src/api-engine/user/views.py +++ b/src/api-engine/user/views.py @@ -5,7 +5,6 @@ import logging from typing import Optional -from django.core.paginator import Paginator from drf_yasg.utils import swagger_auto_schema from rest_framework import viewsets, status from rest_framework.decorators import action From 9222ed807d22fa23cb47527755a5f7920a6edc21 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 08:52:34 +0800 Subject: [PATCH 07/67] Remove redundant old API files Signed-off-by: dodo920306 --- src/api-engine/api/common/enums.py | 96 --- src/api-engine/api/management/__init__.py | 0 .../api/management/commands/__init__.py | 0 .../api/management/commands/create_user.py | 58 -- src/api-engine/api/models.py | 51 -- src/api-engine/api/routes/__init__.py | 3 - .../api/routes/chaincode/__init__.py | 0 .../api/routes/chaincode/serializers.py | 86 -- src/api-engine/api/routes/chaincode/views.py | 751 ------------------ src/api-engine/api/utils/__init__.py | 3 +- src/api-engine/api/utils/common.py | 274 ------- src/api-engine/common/serializers.py | 1 + 12 files changed, 2 insertions(+), 1321 deletions(-) delete mode 100644 src/api-engine/api/management/__init__.py delete mode 100644 src/api-engine/api/management/commands/__init__.py delete mode 100644 src/api-engine/api/management/commands/create_user.py delete mode 100644 src/api-engine/api/models.py delete mode 100644 src/api-engine/api/routes/__init__.py delete mode 100644 src/api-engine/api/routes/chaincode/__init__.py delete mode 100644 src/api-engine/api/routes/chaincode/serializers.py delete mode 100644 src/api-engine/api/routes/chaincode/views.py diff --git a/src/api-engine/api/common/enums.py b/src/api-engine/api/common/enums.py index 36d150600..d500c7bde 100644 --- a/src/api-engine/api/common/enums.py +++ b/src/api-engine/api/common/enums.py @@ -69,102 +69,6 @@ def names(cls): return [name.lower() for name, _ in cls.__members__.items()] -@unique -class HostStatus(ExtraEnum): - Inactive = 0 - Active = 1 - - -@unique -class NetworkStatus(ExtraEnum): - Stopped = 0 - Running = 1 - Error = 2 - - -@unique -class LogLevel(ExtraEnum): - Info = 0 - Warning = 1 - Debug = 2 - Error = 3 - Critical = 4 - -@unique -class NetworkType(ExtraEnum): - Fabric = "fabric" - - -@unique -class FabricCAServerType(ExtraEnum): - # every company only can create one TLS type ca server - TLS = "tls" - Signature = "signature" - - -@unique -class FabricVersions(ExtraEnum): - V1_4 = "1.4.2" - V1_5 = "1.5" - - -@unique -class FabricCAUserType(ExtraEnum): - Peer = "peer" - Orderer = "orderer" - User = "user" - - -@unique -class FabricCAUserStatus(ExtraEnum): - Registering = "registering" - Registered = "registered" - Fail = "fail" - - -@unique -class NetworkCreateType(ExtraEnum): - New = 0 - Import = 1 - - -@unique -class K8SCredentialType(ExtraEnum): - CertKey = 0 - Config = 1 - UsernamePassword = 2 - - -@unique -class ConsensusPlugin(ExtraEnum): - Solo = 0 - Kafka = 1 - - -@unique -class UserRole(ExtraEnum): - Admin = 0 - Operator = 1 - User = 2 - - -@unique -class FileType(ExtraEnum): - Certificate = 0 - - -@unique -class AgentOperation(ExtraEnum): - Create = "create" - Start = "start" - Stop = "stop" - Query = "query" - Update = "update" - Delete = "delete" - FabricCARegister = "fabric:ca:register" - NewNetwork = "new:network" - - class EnumWithDisplayMeta(EnumMeta): def __new__(mcs, name, bases, attrs): display_strings = attrs.get("DisplayStrings") diff --git a/src/api-engine/api/management/__init__.py b/src/api-engine/api/management/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/api-engine/api/management/commands/__init__.py b/src/api-engine/api/management/commands/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/api-engine/api/management/commands/create_user.py b/src/api-engine/api/management/commands/create_user.py deleted file mode 100644 index 4f5a98fbb..000000000 --- a/src/api-engine/api/management/commands/create_user.py +++ /dev/null @@ -1,58 +0,0 @@ -import logging - -from django.core.exceptions import ObjectDoesNotExist -from django.core.management.base import BaseCommand -from api.models import UserProfile - -LOG = logging.getLogger(__name__) - - -class Command(BaseCommand): - help = "Create user" - - def add_arguments(self, parser): - parser.add_argument("--username", help="Username", required=True) - parser.add_argument( - "--is_superuser", action="store_true", required=True - ) - parser.add_argument( - "--password", help="Password of new user", required=True - ) - parser.add_argument("--email", help="Email of new user", required=True) - parser.add_argument("--role", help="role of new user", required=True) - parser.add_argument( - "--force", - help="whether force create user", - required=False, - action="store_true", - ) - - def handle(self, *args, **options): - username = options.get("username") - password = options.get("password") - role = options.get("role") - email = options.get("email") - is_superuser = options.get("is_superuser", False) - force = options.get("force", False) - - try: - user = UserProfile.objects.get(email=email) - except ObjectDoesNotExist: - user = UserProfile( - username=username, - role=role, - email=email, - is_superuser=is_superuser, - ) - user.set_password(password) - user.save() - else: - if force: - user.username = username - user.role = role - user.is_superuser = is_superuser - user.set_password(password) - user.save() - self.stdout.write( - self.style.SUCCESS("Create user successfully %s" % user.id) - ) diff --git a/src/api-engine/api/models.py b/src/api-engine/api/models.py deleted file mode 100644 index 98159cd97..000000000 --- a/src/api-engine/api/models.py +++ /dev/null @@ -1,51 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -from django.conf import settings -from django.db import models - -from api.utils.common import make_uuid - -# from user.models import UserProfile - -SUPER_USER_TOKEN = getattr(settings, "ADMIN_TOKEN", "") -MAX_CAPACITY = getattr(settings, "MAX_AGENT_CAPACITY", 100) -MAX_NODE_CAPACITY = getattr(settings, "MAX_NODE_CAPACITY", 600) -MEDIA_ROOT = getattr(settings, "MEDIA_ROOT") -LIMIT_K8S_CONFIG_FILE_MB = 100 -# Limit file upload size less than 100Mb -LIMIT_FILE_MB = 100 -MIN_PORT = 1 -MAX_PORT = 65535 - - -class ChainCode(models.Model): - id = models.UUIDField( - primary_key=True, - help_text="ID of ChainCode", - default=make_uuid, - editable=False, - unique=True, - ) - package_id = models.CharField( - help_text="package_id of chainCode", - max_length=128, - editable=False, - unique=True, - ) - label = models.CharField(help_text="label of chainCode", max_length=128) - creator = models.CharField( - help_text="creator of chainCode", max_length=128 - ) - language = models.CharField( - help_text="language of chainCode", max_length=128 - ) - description = models.CharField( - help_text="description of chainCode", - max_length=128, - blank=True, - null=True, - ) - create_ts = models.DateTimeField( - help_text="Create time of chainCode", auto_now_add=True - ) diff --git a/src/api-engine/api/routes/__init__.py b/src/api-engine/api/routes/__init__.py deleted file mode 100644 index 0480730a5..000000000 --- a/src/api-engine/api/routes/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/src/api-engine/api/routes/chaincode/__init__.py b/src/api-engine/api/routes/chaincode/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/api-engine/api/routes/chaincode/serializers.py b/src/api-engine/api/routes/chaincode/serializers.py deleted file mode 100644 index bfcc977c5..000000000 --- a/src/api-engine/api/routes/chaincode/serializers.py +++ /dev/null @@ -1,86 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -from rest_framework import serializers - - -from api.models import ChainCode -from api.common.serializers import ListResponseSerializer - -from api_engine.settings import FABRIC_CHAINCODE_STORE - - -def upload_to(instance, filename): - return "/".join([FABRIC_CHAINCODE_STORE, instance.user_name, filename]) - - -class ChainCodeIDSerializer(serializers.Serializer): - id = serializers.UUIDField(help_text="ChainCode ID") - - -class ChainCodePackageBody(serializers.Serializer): - file = serializers.FileField() - - description = serializers.CharField(max_length=128, required=False) - - def validate(self, attrs): - extension_get = self.extension_for_file(attrs["file"]) - if not extension_get: - raise serializers.ValidationError("unsupported package type") - return super().validate(attrs) - - @staticmethod - def extension_for_file(file): - extension = file.name.endswith(".tar.gz") - return extension - - -class ChainCodeNetworkSerializer(serializers.Serializer): - id = serializers.UUIDField(help_text="Network ID") - name = serializers.CharField(max_length=128, help_text="name of Network") - - -class ChainCodeOrgListSerializer(serializers.Serializer): - id = serializers.UUIDField(help_text="Organization ID") - name = serializers.CharField( - max_length=128, help_text="name of Organization" - ) - - -class ChainCodeResponseSerializer( - ChainCodeIDSerializer, serializers.ModelSerializer -): - id = serializers.UUIDField(help_text="ID of ChainCode") - # network = ChainCodeNetworkSerializer() - # organizations = ChainCodeOrgListSerializer(many=True) - - class Meta: - model = ChainCode - fields = ( - "id", - "package_id", - "label", - "creator", - "language", - "create_ts", - "description", - ) - - -class ChaincodeListResponse(ListResponseSerializer): - data = ChainCodeResponseSerializer(many=True, help_text="ChianCode data") - - -class ChainCodeApproveForMyOrgBody(serializers.Serializer): - channel_name = serializers.CharField(max_length=128, required=True) - chaincode_name = serializers.CharField(max_length=128, required=True) - chaincode_version = serializers.CharField(max_length=128, required=True) - sequence = serializers.IntegerField(min_value=1, required=True) - policy = serializers.CharField( - max_length=128, required=False, allow_blank=True - ) - init_flag = serializers.BooleanField(required=False) - - -class ChainCodeCommitBody(ChainCodeApproveForMyOrgBody): - pass diff --git a/src/api-engine/api/routes/chaincode/views.py b/src/api-engine/api/routes/chaincode/views.py deleted file mode 100644 index 7402f2ff4..000000000 --- a/src/api-engine/api/routes/chaincode/views.py +++ /dev/null @@ -1,751 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -from rest_framework import viewsets, status -from rest_framework.response import Response -from rest_framework.decorators import action -from rest_framework.permissions import IsAuthenticated -import os -import tempfile -import shutil -import tarfile -import json - -from drf_yasg.utils import swagger_auto_schema -from api.models import ChainCode -from api.utils.common import make_uuid -from django.core.paginator import Paginator - -from api.lib.peer.chaincode import ChainCode as PeerChainCode -from api.common.serializers import PageQuerySerializer -from api.utils.common import with_common_response, init_env_vars -from api.exceptions import ResourceNotFound - -from api.routes.chaincode.serializers import ( - ChainCodePackageBody, - ChainCodeIDSerializer, - ChainCodeCommitBody, - ChainCodeApproveForMyOrgBody, - ChaincodeListResponse, -) -from api.common import ok, err -import threading -import hashlib -import logging - -from api_engine.settings import CELLO_HOME, FABRIC_CHAINCODE_STORE -from channel.models import Channel -from node.models import Node - -LOG = logging.getLogger(__name__) - - -class ChainCodeViewSet(viewsets.ViewSet): - """Class represents Channel related operations.""" - - permission_classes = [ - IsAuthenticated, - ] - - def _read_cc_pkg(self, pk, filename, ccpackage_path): - """ - read and extract chaincode package meta info - :pk: chaincode id - :filename: uploaded chaincode package filename - :ccpackage_path: chaincode package path - """ - try: - meta_path = os.path.join(ccpackage_path, "metadata.json") - # extract metadata file - with tarfile.open( - os.path.join(ccpackage_path, filename) - ) as tared_file: - metadata_file = None - for member in tared_file.getmembers(): - if member.name.endswith("metadata.json"): - metadata_file = member - break - - if metadata_file is not None: - # Extract the metadata file - metadata_content = ( - tared_file.extractfile(metadata_file) - .read() - .decode("utf-8") - ) - metadata = json.loads(metadata_content) - language = metadata["type"] - label = metadata["label"] - - if os.path.exists(meta_path): - os.remove(meta_path) - - chaincode = ChainCode.objects.get(id=pk) - chaincode.package_id = chaincode.package_id - chaincode.language = language - chaincode.label = label - chaincode.save() - - except Exception as e: - LOG.exception("Could not read Chaincode Package") - raise e - - @swagger_auto_schema( - query_serializer=PageQuerySerializer, - responses=with_common_response( - {status.HTTP_201_CREATED: ChaincodeListResponse} - ), - ) - def list(self, request): - """ - List Chaincodes - :param request: org_id - :return: chaincode list - :rtype: list - """ - serializer = PageQuerySerializer(data=request.GET) - if serializer.is_valid(raise_exception=True): - page = serializer.validated_data.get("page") - per_page = serializer.validated_data.get("per_page") - - try: - org = request.user.organization - chaincodes = ChainCode.objects.filter( - creator=org.name - ).order_by("create_ts") - p = Paginator(chaincodes, per_page) - chaincodes_pages = p.page(page) - chanincodes_list = [ - { - "id": chaincode.id, - "package_id": chaincode.package_id, - "label": chaincode.label, - "creator": chaincode.creator, - "language": chaincode.language, - "create_ts": chaincode.create_ts, - "description": chaincode.description, - } - for chaincode in chaincodes_pages - ] - response = ChaincodeListResponse( - {"data": chanincodes_list, "total": chaincodes.count()} - ) - return Response( - data=ok(response.data), status=status.HTTP_200_OK - ) - except Exception as e: - return Response( - err(e.args), status=status.HTTP_400_BAD_REQUEST - ) - - @swagger_auto_schema( - method="post", - query_serializer=PageQuerySerializer, - responses=with_common_response( - {status.HTTP_201_CREATED: ChainCodeIDSerializer} - ), - ) - @action(detail=False, methods=["post"], url_path="chaincodeRepo") - def package(self, request): - serializer = ChainCodePackageBody(data=request.data) - if serializer.is_valid(raise_exception=True): - file = serializer.validated_data.get("file") - description = serializer.validated_data.get("description") - uuid = make_uuid() - try: - fd, temp_cc_path = tempfile.mkstemp() - # try to calculate packageid - with open(fd, "wb") as f: - for chunk in file.chunks(): - f.write(chunk) - - with tarfile.open(temp_cc_path, "r:gz") as tar: - # Locate the metadata file - metadata_file = None - for member in tar.getmembers(): - if member.name.endswith("metadata.json"): - metadata_file = member - break - - if metadata_file is not None: - # Extract the metadata file - metadata_content = ( - tar.extractfile(metadata_file) - .read() - .decode("utf-8") - ) - metadata = json.loads(metadata_content) - label = metadata.get("label") - else: - return Response( - err( - "Metadata file not found in the chaincode package." - ), - status=status.HTTP_400_BAD_REQUEST, - ) - - org = request.user.organization - # qs = Node.objects.filter(type="peer", organization=org) - # if not qs.exists(): - # return Response( - # err("at least 1 peer node is required for the chaincode package upload."), - # status=status.HTTP_400_BAD_REQUEST - # ) - # peer_node = qs.first() - # envs = init_env_vars(peer_node, org) - # peer_channel_cli = PeerChainCode("v2.5.10", **envs) - # return_code, content = peer_channel_cli.lifecycle_calculatepackageid(temp_cc_path) - # if (return_code != 0): - # return Response( - # err("calculate packageid failed for {}.".format(content)), - # status=status.HTTP_400_BAD_REQUEST - # ) - # packageid = content.strip() - - # manually calculate the package id - sha256_hash = hashlib.sha256() - with open(temp_cc_path, "rb") as f: - for byte_block in iter(lambda: f.read(4096), b""): - sha256_hash.update(byte_block) - packageid = label + ":" + sha256_hash.hexdigest() - - # check if packageid exists - cc = ChainCode.objects.filter(package_id=packageid) - if cc.exists(): - return Response( - err( - "package with id {} already exists.".format( - packageid - ) - ), - status=status.HTTP_400_BAD_REQUEST, - ) - - chaincode = ChainCode( - id=uuid, - package_id=packageid, - creator=org.name, - description=description, - ) - chaincode.save() - - # save chaincode package locally - ccpackage_path = os.path.join( - FABRIC_CHAINCODE_STORE, packageid - ) - if not os.path.exists(ccpackage_path): - os.makedirs(ccpackage_path) - ccpackage = os.path.join(ccpackage_path, file.name) - shutil.copy(temp_cc_path, ccpackage) - - # start thread to read package meta info, update db - try: - threading.Thread( - target=self._read_cc_pkg, - args=(uuid, file.name, ccpackage_path), - ).start() - except Exception as e: - LOG.exception("Failed Threading") - raise e - - return Response(ok("success"), status=status.HTTP_200_OK) - except Exception as e: - return Response( - err(e.args), status=status.HTTP_400_BAD_REQUEST - ) - finally: - os.remove(temp_cc_path) - - @swagger_auto_schema( - method="post", - responses=with_common_response( - {status.HTTP_201_CREATED: ChainCodeIDSerializer} - ), - ) - @action(detail=False, methods=["post"]) - def install(self, request): - chaincode_id = request.data.get("id") - # Get the selected node ID from request - node_id = request.data.get("node") - try: - cc_targz = "" - file_path = os.path.join(FABRIC_CHAINCODE_STORE, chaincode_id) - for _, _, files in os.walk(file_path): - cc_targz = os.path.join(file_path + "/" + files[0]) - break - - org = request.user.organization - - # If node_id is provided, get that specific node - if node_id: - try: - peer_node = Node.objects.get( - id=node_id, type="peer", organization=org - ) - except Node.DoesNotExist: - return Response( - err("Selected peer node not found or not authorized."), - status=status.HTTP_404_NOT_FOUND, - ) - else: - # Fallback to first peer if no node selected - qs = Node.objects.filter(type="peer", organization=org) - if not qs.exists(): - raise ResourceNotFound - peer_node = qs.first() - - envs = init_env_vars(peer_node, org) - peer_channel_cli = PeerChainCode(**envs) - res = peer_channel_cli.lifecycle_install(cc_targz) - if res != 0: - return Response( - err("install chaincode failed."), - status=status.HTTP_400_BAD_REQUEST, - ) - except Exception as e: - return Response(err(e.args), status=status.HTTP_400_BAD_REQUEST) - return Response(ok("success"), status=status.HTTP_200_OK) - - @swagger_auto_schema( - method="get", - responses=with_common_response( - {status.HTTP_201_CREATED: ChainCodeIDSerializer} - ), - ) - @action(detail=False, methods=["get"]) - def query_installed(self, request): - try: - org = request.user.organization - qs = Node.objects.filter(type="peer", organization=org) - if not qs.exists(): - raise ResourceNotFound("Peer Does Not Exist") - peer_node = qs.first() - envs = init_env_vars(peer_node, org) - - timeout = "5s" - peer_channel_cli = PeerChainCode(**envs) - res, installed_chaincodes = ( - peer_channel_cli.lifecycle_query_installed(timeout) - ) - if res != 0: - return Response( - err("query installed chaincode failed."), - status=status.HTTP_400_BAD_REQUEST, - ) - except Exception as e: - return Response(err(e.args), status=status.HTTP_400_BAD_REQUEST) - return Response(ok(installed_chaincodes), status=status.HTTP_200_OK) - - @swagger_auto_schema( - method="get", - responses=with_common_response( - {status.HTTP_201_CREATED: ChainCodeIDSerializer} - ), - ) - @action(detail=False, methods=["get"]) - def get_installed_package(self, request): - try: - org = request.user.organization - qs = Node.objects.filter(type="peer", organization=org) - if not qs.exists(): - raise ResourceNotFound("Peer Does Not Exist") - peer_node = qs.first() - envs = init_env_vars(peer_node, org) - - timeout = "5s" - peer_channel_cli = PeerChainCode(**envs) - res = peer_channel_cli.lifecycle_get_installed_package(timeout) - if res != 0: - return Response( - err("get installed package failed."), - status=status.HTTP_400_BAD_REQUEST, - ) - - except Exception as e: - return Response(err(e.args), status=status.HTTP_400_BAD_REQUEST) - return Response(ok("success"), status=status.HTTP_200_OK) - - @swagger_auto_schema( - method="post", - responses=with_common_response( - {status.HTTP_201_CREATED: ChainCodeIDSerializer} - ), - ) - @action(detail=False, methods=["post"]) - def approve_for_my_org(self, request): - serializer = ChainCodeApproveForMyOrgBody(data=request.data) - if serializer.is_valid(raise_exception=True): - try: - channel_name = serializer.validated_data.get("channel_name") - chaincode_name = serializer.validated_data.get( - "chaincode_name" - ) - chaincode_version = serializer.validated_data.get( - "chaincode_version" - ) - policy = serializer.validated_data.get("policy", "") - sequence = serializer.validated_data.get("sequence") - init_flag = serializer.validated_data.get("init_flag", False) - - org = request.user.organization - qs = Node.objects.filter(type="orderer", organization=org) - if not qs.exists(): - raise ResourceNotFound("Orderer Does Not Exist") - orderer_node = qs.first() - orderer_url = ( - orderer_node.name - + "." - + org.name.split(".", 1)[1] - + ":" - + str(7050) - ) - - qs = Node.objects.filter(type="peer", organization=org) - if not qs.exists(): - raise ResourceNotFound("Peer Does Not Exist") - peer_node = qs.first() - envs = init_env_vars(peer_node, org) - - peer_channel_cli = PeerChainCode(**envs) - code, content = peer_channel_cli.lifecycle_approve_for_my_org( - orderer_url, - channel_name, - chaincode_name, - chaincode_version, - sequence, - policy, - init_flag, - ) - if code != 0: - return Response( - err( - " lifecycle_approve_for_my_org failed. err: " - + content - ), - status=status.HTTP_400_BAD_REQUEST, - ) - except Exception as e: - return Response( - err(e.args), status=status.HTTP_400_BAD_REQUEST - ) - return Response(ok("success"), status=status.HTTP_200_OK) - - @swagger_auto_schema( - method="get", - responses=with_common_response( - {status.HTTP_201_CREATED: ChainCodeIDSerializer} - ), - ) - @action(detail=False, methods=["get"]) - def query_approved(self, request): - try: - org = request.user.organization - qs = Node.objects.filter(type="peer", organization=org) - if not qs.exists(): - raise ResourceNotFound("Peer Does Not Exist") - peer_node = qs.first() - envs = init_env_vars(peer_node, org) - - channel_name = request.data.get("channel_name") - cc_name = request.data.get("chaincode_name") - - peer_channel_cli = PeerChainCode(**envs) - code, content = peer_channel_cli.lifecycle_query_approved( - channel_name, cc_name - ) - if code != 0: - return Response( - err("query_approved failed."), - status=status.HTTP_400_BAD_REQUEST, - ) - - except Exception as e: - return Response(err(e.args), status=status.HTTP_400_BAD_REQUEST) - return Response(ok(content), status=status.HTTP_200_OK) - - @swagger_auto_schema( - method="post", - responses=with_common_response( - {status.HTTP_201_CREATED: ChainCodeIDSerializer} - ), - ) - @action(detail=False, methods=["post"]) - def check_commit_readiness(self, request): - serializer = ChainCodeApproveForMyOrgBody(data=request.data) - if serializer.is_valid(raise_exception=True): - try: - channel_name = serializer.validated_data.get("channel_name") - chaincode_name = serializer.validated_data.get( - "chaincode_name" - ) - chaincode_version = serializer.validated_data.get( - "chaincode_version" - ) - policy = serializer.validated_data.get("policy") - # Perhaps the orderer's port is best stored in the database - orderer_url = serializer.validated_data.get("orderer_url") - sequence = serializer.validated_data.get("sequence") - org = request.user.organization - qs = Node.objects.filter(type="orderer", organization=org) - if not qs.exists(): - raise ResourceNotFound("Orderer Does Not Exist") - orderer_node = qs.first() - - orderer_tls_dir = "{}/{}/crypto-config/ordererOrganizations/{}/orderers/{}/msp/tlscacerts".format( - CELLO_HOME, - org.name, - org.name.split(".", 1)[1], - orderer_node.name + "." + org.name.split(".", 1)[1], - ) - - orderer_tls_root_cert = "" - for _, _, files in os.walk(orderer_tls_dir): - orderer_tls_root_cert = orderer_tls_dir + "/" + files[0] - break - - qs = Node.objects.filter(type="peer", organization=org) - if not qs.exists(): - raise ResourceNotFound("Peer Does Not Exist") - peer_node = qs.first() - envs = init_env_vars(peer_node, org) - - peer_channel_cli = PeerChainCode(**envs) - code, content = ( - peer_channel_cli.lifecycle_check_commit_readiness( - orderer_url, - orderer_tls_root_cert, - channel_name, - chaincode_name, - chaincode_version, - policy, - sequence, - ) - ) - if code != 0: - return Response( - err("check_commit_readiness failed."), - status=status.HTTP_400_BAD_REQUEST, - ) - - except Exception as e: - return Response( - err(e.args), status=status.HTTP_400_BAD_REQUEST - ) - return Response(ok(content), status=status.HTTP_200_OK) - - def _get_orderer_url(self, org): - qs = Node.objects.filter(type="orderer", organization=org) - if not qs.exists(): - raise ResourceNotFound("Orderer Does Not Exist") - return ( - qs.first().name + "." + org.name.split(".", 1)[1] + ":" + str(7050) - ) - - def _get_peer_channel_cli(self, org): - qs = Node.objects.filter(type="peer", organization=org) - if not qs.exists(): - raise ResourceNotFound("Peer Does Not Exist") - envs = init_env_vars(qs.first(), org) - return PeerChainCode(**envs) - - def _get_approved_organizations_by_channel_and_chaincode( - self, - peer_channel_cli, - channel_name, - chaincode_name, - chaincode_version, - sequence, - ): - code, readiness_result = ( - peer_channel_cli.lifecycle_check_commit_readiness( - channel_name, chaincode_name, chaincode_version, sequence - ) - ) - if code != 0: - raise Exception( - f"Check commit readiness failed: {readiness_result}" - ) - - # Check approved status - approvals = readiness_result.get("approvals", {}) - approved_msps = [ - org_msp for org_msp, approved in approvals.items() if approved - ] - if not approved_msps: - raise Exception("No organizations have approved this chaincode") - - LOG.info(f"Approved organizations: {approved_msps}") - - try: - channel = Channel.objects.get(name=channel_name) - channel_orgs = channel.organizations.all() - except Channel.DoesNotExist: - raise Exception(f"Channel {channel_name} not found") - - # find the corresponding organization by MSP ID - # MSP ID format: Org1MSP, Org2MSP -> organization name format: org1.xxx, org2.xxx - approved_orgs = [] - for msp_id in approved_msps: - if msp_id.endswith("MSP"): - org_prefix = msp_id[ - :-3 - ].lower() # remove "MSP" and convert to lowercase - # find the corresponding organization in the channel - for channel_org in channel_orgs: - if channel_org.name.split(".")[0] == org_prefix: - approved_orgs.append(channel_org) - LOG.info( - f"Found approved organization: {channel_org.name} (MSP: {msp_id})" - ) - break - - if not approved_orgs: - raise Exception("No approved organizations found in this channel") - return approved_orgs - - def _get_peer_addresses_and_certs_by_organizations(self, orgs): - addresses = [] - certs = [] - for org in orgs: - qs = Node.objects.filter(type="peer", organization=org) - if not qs.exists(): - LOG.warning( - f"No peer nodes found for organization: {org.name}" - ) - continue - - # select the first peer node for each organization - peer = qs.first() - peer_tls_cert = "{}/{}/crypto-config/peerOrganizations/{}/peers/{}/tls/ca.crt".format( - CELLO_HOME, org.name, org.name, peer.name + "." + org.name - ) - peer_address = peer.name + "." + org.name + ":" + str(7051) - LOG.info(f"Added peer from org {org.name}: {peer_address}") - - addresses.append(peer_address) - certs.append(peer_tls_cert) - - if not addresses: - raise Exception("No peer nodes found for specified organizations") - return addresses, certs - - @swagger_auto_schema( - method="post", - responses=with_common_response( - {status.HTTP_201_CREATED: ChainCodeIDSerializer} - ), - ) - @action(detail=False, methods=["post"]) - def commit(self, request): - serializer = ChainCodeCommitBody(data=request.data) - if serializer.is_valid(raise_exception=True): - try: - channel_name = serializer.validated_data.get("channel_name") - chaincode_name = serializer.validated_data.get( - "chaincode_name" - ) - chaincode_version = serializer.validated_data.get( - "chaincode_version" - ) - policy = serializer.validated_data.get("policy") - sequence = serializer.validated_data.get("sequence") - init_flag = serializer.validated_data.get("init_flag", False) - org = request.user.organization - - orderer_url = self._get_orderer_url(org) - - # Step 1: Check commit readiness, find all approved organizations - peer_channel_cli = self._get_peer_channel_cli(org) - approved_organizations = ( - self._get_approved_organizations_by_channel_and_chaincode( - peer_channel_cli, - channel_name, - chaincode_name, - chaincode_version, - sequence, - ) - ) - - # Step 2: Get peer nodes and root certs - peer_address_list, peer_root_certs = ( - self._get_peer_addresses_and_certs_by_organizations( - approved_organizations - ) - ) - - # Step 3: Commit chaincode - code = peer_channel_cli.lifecycle_commit( - orderer_url, - channel_name, - chaincode_name, - chaincode_version, - sequence, - policy, - peer_address_list, - peer_root_certs, - init_flag, - ) - if code != 0: - return Response( - err("Commit chaincode failed"), - status=status.HTTP_400_BAD_REQUEST, - ) - - LOG.info(f"Chaincode {chaincode_name} committed successfully") - - # Step 4: Query committed chaincode - code, committed_result = ( - peer_channel_cli.lifecycle_query_committed( - channel_name, chaincode_name - ) - ) - if code == 0: - LOG.info(committed_result) - return Response( - ok(committed_result), status=status.HTTP_200_OK - ) - else: - return Response( - err("Query committed failed."), - status=status.HTTP_400_BAD_REQUEST, - ) - - except Exception as e: - LOG.error(f"Commit chaincode failed: {str(e)}") - return Response( - err(f"Commit chaincode failed: {str(e)}"), - status=status.HTTP_400_BAD_REQUEST, - ) - - @swagger_auto_schema( - method="get", - responses=with_common_response( - {status.HTTP_201_CREATED: ChainCodeIDSerializer} - ), - ) - @action(detail=False, methods=["get"]) - def query_committed(self, request): - try: - channel_name = request.data.get("channel_name") - chaincode_name = request.data.get("chaincode_name") - org = request.user.organization - qs = Node.objects.filter(type="peer", organization=org) - if not qs.exists(): - raise ResourceNotFound("Peer Does Not Exist") - peer_node = qs.first() - envs = init_env_vars(peer_node, org) - peer_channel_cli = PeerChainCode(**envs) - code, chaincodes_commited = ( - peer_channel_cli.lifecycle_query_committed( - channel_name, chaincode_name - ) - ) - if code != 0: - return Response( - err("query committed failed."), - status=status.HTTP_400_BAD_REQUEST, - ) - except Exception as e: - LOG.exception("Could Not Commit Query") - return Response(err(e.args), status=status.HTTP_400_BAD_REQUEST) - return Response(ok(chaincodes_commited), status=status.HTTP_200_OK) diff --git a/src/api-engine/api/utils/__init__.py b/src/api-engine/api/utils/__init__.py index 1f4b2081e..5969b0c4c 100644 --- a/src/api-engine/api/utils/__init__.py +++ b/src/api-engine/api/utils/__init__.py @@ -8,8 +8,7 @@ from api.common.enums import ErrorCode from rest_framework import status from rest_framework.exceptions import ErrorDetail -from .common import zip_dir, zip_file -from api.common import ok, err +from api.common import err LOG = logging.getLogger(__name__) diff --git a/src/api-engine/api/utils/common.py b/src/api-engine/api/utils/common.py index 5f93d9310..ade3247de 100644 --- a/src/api-engine/api/utils/common.py +++ b/src/api-engine/api/utils/common.py @@ -19,13 +19,6 @@ LOG = logging.getLogger(__name__) -def make_uuid(): - return str(uuid.uuid4()) - - -def random_name(prefix=""): - return "%s-%s" % (prefix, uuid.uuid4().hex) - def with_common_response(responses=None): if responses is None: @@ -52,270 +45,3 @@ def with_common_response(responses=None): (serializers.FileField, openapi.TYPE_FILE), (serializers.ImageField, openapi.TYPE_FILE), ] - - -def to_form_paras(self): - custom_paras = [] - for field_name, field in self.fields.items(): - type_str = openapi.TYPE_STRING - for field_class, type_format in basic_type_info: - if isinstance(field, field_class): - type_str = type_format - help_text = getattr(field, "help_text") - default = getattr(field, "default", None) - required = getattr(field, "required") - if callable(default): - custom_paras.append( - openapi.Parameter( - field_name, - openapi.IN_FORM, - help_text, - type=type_str, - required=required, - ) - ) - else: - custom_paras.append( - openapi.Parameter( - field_name, - openapi.IN_FORM, - help_text, - type=type_str, - required=required, - default=default, - ) - ) - return custom_paras - - -def hash_file(file, block_size=65536): - hash_func = hashlib.md5() - for buf in iter(partial(file.read, block_size), b""): - hash_func.update(buf) - - return hash_func.hexdigest() - - -def zip_dir(dirpath, outFullName): - """ - Compress the specified folder - :param dirpath: specified folder - :param outFullName: Save path+xxxx.zip - :return: null - """ - dir_dst = "/" + dirpath.rsplit("/", 1)[1] - zdir = ZipFile(outFullName, "w") - for path, dirnames, filenames in os.walk(dirpath): - fpath = dir_dst + path.replace(dirpath, "") - for filename in filenames: - zdir.write( - os.path.join(path, filename), os.path.join(fpath, filename) - ) - # zip empty folder - for dirname in dirnames: - zdir.write( - os.path.join(path, dirname), os.path.join(fpath, dirname) - ) - zdir.close() - - -def zip_file(dirpath, outFullName): - """ - Compress the specified file - :param dirpath: specified folder of file - :param outFullName: Save path+filename.zip - :return: null - """ - zfile = ZipFile(outFullName, "w") - zfile.write(dirpath, dirpath.rsplit("/", 1)[1]) - zfile.close() - - -def parse_block_file(data): - """ - Parse org config from channel config block. - - :param data: channel config block in json format. - :param org_name: the organization prefix name - :return organization config - """ - config = loads(data) - if config.get("data"): - return ( - config.get("data") - .get("data")[0] - .get("payload") - .get("data") - .get("config") - ) - return {"error": "can't find channel config"} - - -def to_dict(data): - return loads(data) - - -def json_filter(input, output, expression): - """ - Process JSON data using path expression similar to jq - - Args: - input (str): JSON data or file path to JSON - output (str): Path expression like ".data.data[0].payload.data.config" - - Returns: - dict: Processed JSON data - """ - # if json_data is a file path, read the file - if isinstance(input, str): - with open(input, "r", encoding="utf-8") as f: - data = json.load(f) - else: - data = input - - # parse the path expression - path_parts = expression.strip(".").split(".") - result = data - - for part in path_parts: - # handle array index, like data[0] - if "[" in part and "]" in part: - array_name = part.split("[")[0] - index = int(part.split("[")[1].split("]")[0]) - result = result[array_name][index] - else: - result = result[part] - - with open(output, "w", encoding="utf-8") as f: - json.dump(result, f, sort_keys=False, indent=4) - - LOG.info("jq {} {} -> {}".format(expression, input, output)) - - -def json_add_anchor_peer(input, output, anchor_peer_config, org_msp): - """ - Add anchor peer to the organization - - Args: - input (str): JSON data or file path to JSON - output (str): Path expression like ".data.data[0].payload.data.config" - expression (str): Anchor peer data - """ - # if json_data is a file path, read the file - if isinstance(input, str): - with open(input, "r", encoding="utf-8") as f: - data = json.load(f) - else: - data = input - - if "groups" not in data["channel_group"]: - data["channel_group"]["groups"] = {} - if "Application" not in data["channel_group"]["groups"]: - data["channel_group"]["groups"]["Application"] = {"groups": {}} - if org_msp not in data["channel_group"]["groups"]["Application"]["groups"]: - data["channel_group"]["groups"]["Application"]["groups"][org_msp] = { - "values": {} - } - - data["channel_group"]["groups"]["Application"]["groups"][org_msp][ - "values" - ].update(anchor_peer_config) - - with open(output, "w", encoding="utf-8") as f: - json.dump(data, f, sort_keys=False, indent=4) - - LOG.info( - "jq '.channel_group.groups.Application.groups.Org1MSP.values += ... ' {} -> {}".format( - input, output - ) - ) - - -def json_create_envelope(input, output, channel): - """ - Create a config update envelope structure - - Args: - input (str): Path to the config update JSON file - output (str): Path to save the envelope JSON - channel (str): Name of the channel - """ - try: - # Read the config update file - with open(input, "r", encoding="utf-8") as f: - config_update = json.load(f) - - # Create the envelope structure - envelope = { - "payload": { - "header": { - "channel_header": {"channel_id": channel, "type": 2} - }, - "data": {"config_update": config_update}, - } - } - - # Write the envelope to output file - with open(output, "w", encoding="utf-8") as f: - json.dump(envelope, f, sort_keys=False, indent=4) - - LOG.info("echo 'payload ... ' | jq . > {}".format(output)) - - except Exception as e: - LOG.error("Failed to create config update envelope: {}".format(str(e))) - raise - - -def init_env_vars(node, org): - """ - Initialize environment variables for peer channel CLI. - :param node: Node object - :param org: Organization object. - :return env: dict - """ - org_name = org.name - org_domain = org_name.split(".", 1)[1] - dir_certificate = "{}/{}/crypto-config/ordererOrganizations/{}".format( - CELLO_HOME, org_name, org_domain - ) - dir_node = "{}/{}/crypto-config/peerOrganizations".format( - CELLO_HOME, org_name - ) - - envs = {} - - if node.type == "orderer": - envs = { - "CORE_PEER_TLS_ENABLED": "true", - "ORDERER_CA": "{}/orderers/{}/msp/tlscacerts/tlsca.{}-cert.pem".format( - dir_certificate, node.name + "." + org_domain, org_domain - ), - "ORDERER_ADMIN_TLS_SIGN_CERT": "{}/orderers/{}/tls/server.crt".format( - dir_certificate, node.name + "." + org_domain - ), - "ORDERER_ADMIN_TLS_PRIVATE_KEY": "{}/orderers/{}/tls/server.key".format( - dir_certificate, node.name + "." + org_domain - ), - } - - elif node.type == "peer": - envs = { - "CORE_PEER_TLS_ENABLED": "true", - "CORE_PEER_LOCALMSPID": "{}MSP".format( - org_name.split(".")[0].capitalize() - ), - "CORE_PEER_TLS_ROOTCERT_FILE": "{}/{}/peers/{}/tls/ca.crt".format( - dir_node, org_name, node.name + "." + org_name - ), - "CORE_PEER_MSPCONFIGPATH": "{}/{}/users/Admin@{}/msp".format( - dir_node, org_name, org_name - ), - "CORE_PEER_ADDRESS": "{}:{}".format( - node.name + "." + org_name, str(7051) - ), - "FABRIC_CFG_PATH": "{}/{}/peers/{}/".format( - dir_node, org_name, node.name + "." + org_name - ), - } - - return envs diff --git a/src/api-engine/common/serializers.py b/src/api-engine/common/serializers.py index 2ff60c827..41ebd2896 100644 --- a/src/api-engine/common/serializers.py +++ b/src/api-engine/common/serializers.py @@ -15,6 +15,7 @@ def get_paginator(self, q: QuerySet) -> Paginator: self.is_valid(raise_exception=True) return Paginator(q, self.data['per_page']) + class ListResponseSerializer(serializers.Serializer): total = serializers.IntegerField( help_text="Total number of data", min_value=0 From 6b45ce1f0c5fe3cd85188138d85544392f849301 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 08:55:48 +0800 Subject: [PATCH 08/67] Remove redundant old API files Signed-off-by: dodo920306 --- src/api-engine/api/admin.py | 6 - src/api-engine/api/lib/agent/__init__.py | 3 - src/api-engine/api/lib/agent/base.py | 33 --- .../api/lib/agent/docker/__init__.py | 4 - .../api/lib/agent/docker/handler.py | 157 ---------- src/api-engine/api/lib/agent/network_base.py | 15 - .../api/lib/configtxgen/__init__.py | 5 - .../api/lib/configtxgen/configtx.py | 273 ------------------ .../api/lib/configtxgen/configtxgen.py | 77 ----- .../api/lib/configtxlator/__init__.py | 0 .../api/lib/configtxlator/configtxlator.py | 101 ------- src/api-engine/api/utils/node_config.py | 146 ---------- 12 files changed, 820 deletions(-) delete mode 100644 src/api-engine/api/admin.py delete mode 100644 src/api-engine/api/lib/agent/__init__.py delete mode 100644 src/api-engine/api/lib/agent/base.py delete mode 100644 src/api-engine/api/lib/agent/docker/__init__.py delete mode 100644 src/api-engine/api/lib/agent/docker/handler.py delete mode 100644 src/api-engine/api/lib/agent/network_base.py delete mode 100644 src/api-engine/api/lib/configtxgen/__init__.py delete mode 100644 src/api-engine/api/lib/configtxgen/configtx.py delete mode 100644 src/api-engine/api/lib/configtxgen/configtxgen.py delete mode 100644 src/api-engine/api/lib/configtxlator/__init__.py delete mode 100644 src/api-engine/api/lib/configtxlator/configtxlator.py delete mode 100644 src/api-engine/api/utils/node_config.py diff --git a/src/api-engine/api/admin.py b/src/api-engine/api/admin.py deleted file mode 100644 index 51fb5d189..000000000 --- a/src/api-engine/api/admin.py +++ /dev/null @@ -1,6 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -from django.contrib import admin - -# Register your models here. diff --git a/src/api-engine/api/lib/agent/__init__.py b/src/api-engine/api/lib/agent/__init__.py deleted file mode 100644 index f21c6354a..000000000 --- a/src/api-engine/api/lib/agent/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# \ No newline at end of file diff --git a/src/api-engine/api/lib/agent/base.py b/src/api-engine/api/lib/agent/base.py deleted file mode 100644 index 9ac0ccf84..000000000 --- a/src/api-engine/api/lib/agent/base.py +++ /dev/null @@ -1,33 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -import abc - - -class AgentBase(object): - __metaclass__ = abc.ABCMeta - - @abc.abstractmethod - def create(self, *args, **kwargs): - """ - Create a new cluster - Args: - *args: args - **kwargs: keyword args - - Returns: - - """ - pass - - @abc.abstractmethod - def delete(self, *args, **kwargs): - pass - - @abc.abstractmethod - def start(self, *args, **kwargs): - pass - - @abc.abstractmethod - def stop(self, *args, **kwargs): - pass diff --git a/src/api-engine/api/lib/agent/docker/__init__.py b/src/api-engine/api/lib/agent/docker/__init__.py deleted file mode 100644 index 0ad59ed4c..000000000 --- a/src/api-engine/api/lib/agent/docker/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -from api.lib.agent.docker.handler import DockerAgent diff --git a/src/api-engine/api/lib/agent/docker/handler.py b/src/api-engine/api/lib/agent/docker/handler.py deleted file mode 100644 index 58f6342e8..000000000 --- a/src/api-engine/api/lib/agent/docker/handler.py +++ /dev/null @@ -1,157 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -import logging -from requests import get, post -import json - -from api.lib.agent.base import AgentBase - -LOG = logging.getLogger(__name__) - - -class DockerAgent(AgentBase): - """Class represents docker agent.""" - - def __init__(self, node=None): - """init DockerAgent - param: - node:Information needed to create, start, and delete nodes, such as organizations, nodes, and so on - return:null - """ - if node is None: - node = {} - self._id = node.get("id") - self._name = node.get("name") - self._urls = node.get("urls") - self._cname = node.get("container_name") - - def create(self, info): - """ - Create node - :param node: Information needed to create nodes - :return: container ID - :rtype: string - """ - try: - port_map = { - str(port.internal): str(port.external) - for port in info.get("ports") - } - - data = { - "msp": info.get("msp")[2:-1], - "tls": info.get("tls")[2:-1], - "peer_config_file": info.get("config_file")[2:-1], - "orderer_config_file": info.get("config_file")[2:-1], - "img": "hyperledger/fabric:2.5.13", - "cmd": ( - 'bash /tmp/init.sh "peer node start"' - if info.get("type") == "peer" - else 'bash /tmp/init.sh "orderer"' - ), - "name": info.get("name"), - "type": info.get("type"), - "port_map": port_map.__repr__(), - "action": "create", - } - - response = post("{}/api/v1/nodes".format(self._urls), data=data) - - if response.status_code == 200: - txt = json.loads(response.text) - return txt["data"]["id"] - else: - return None - except Exception as e: - LOG.exception("DockerAgent Not Created") - raise e - - def delete(self, *args, **kwargs): - try: - response = post( - "{}/api/v1/nodes/{}".format(self._urls, self._cname), - data={"action": "delete"}, - ) - if response.status_code == 200: - return True - else: - raise response.reason - except Exception as e: - LOG.exception("DockerAgent Not Deleted") - raise e - - def start(self, *args, **kwargs): - try: - response = post( - "{}/api/v1/nodes/{}".format(self._urls, self._cname), - data={"action": "start"}, - ) - if response.status_code == 200: - return True - else: - raise response.reason - except Exception as e: - LOG.exception("DockerAgent Not Started") - raise e - - def restart(self, *args, **kwargs): - try: - response = post( - "{}/api/v1/nodes/{}".format(self._urls, self._cname), - data={"action": "restart"}, - ) - if response.status_code == 200: - return True - else: - raise response.reason - except Exception as e: - LOG.exception("DockerAgent Not Restarted") - raise e - - def stop(self, *args, **kwargs): - try: - response = post( - "{}/api/v1/nodes/{}".format(self._urls, self._cname), - data={"action": "stop"}, - ) - if response.status_code == 200: - return True - else: - raise response.reason - except Exception as e: - LOG.exception("DockerAgent Not Stopped") - raise e - - def get(self, *args, **kwargs): - try: - response = get( - "{}/api/v1/nodes/{}".format(self._urls, self._cname) - ) - if response.status_code == 200: - return True - else: - raise response.reason - except Exception as e: - LOG.exception("DockerAgent Not Found") - raise e - - def update_config(self, config_file, node_type): - try: - cmd = 'bash /tmp/update.sh "{} node start"'.format(node_type) - data = { - "peer_config_file": config_file, - "orderer_config_file": config_file, - "action": "update", - "cmd": cmd, - } - response = post( - "{}/api/v1/nodes/{}".format(self._urls, self._cname), data=data - ) - if response.status_code == 200: - return True - else: - raise response.reason - except Exception as e: - LOG.exception("Config Update Failed") - raise e diff --git a/src/api-engine/api/lib/agent/network_base.py b/src/api-engine/api/lib/agent/network_base.py deleted file mode 100644 index 4a03fed8e..000000000 --- a/src/api-engine/api/lib/agent/network_base.py +++ /dev/null @@ -1,15 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -import abc - - -class NetworkBase(object): - __metaclass__ = abc.ABCMeta - - def __init__(self, *args, **kwargs): - pass - - @abc.abstractmethod - def generate_config(self, *args, **kwargs): - pass diff --git a/src/api-engine/api/lib/configtxgen/__init__.py b/src/api-engine/api/lib/configtxgen/__init__.py deleted file mode 100644 index 14f2016a7..000000000 --- a/src/api-engine/api/lib/configtxgen/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -from .configtx import ConfigTX -from .configtxgen import ConfigTxGen diff --git a/src/api-engine/api/lib/configtxgen/configtx.py b/src/api-engine/api/lib/configtxgen/configtx.py deleted file mode 100644 index 7eb0ae8a1..000000000 --- a/src/api-engine/api/lib/configtxgen/configtx.py +++ /dev/null @@ -1,273 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -import yaml -import os -from copy import deepcopy - -from api_engine.settings import CELLO_HOME - - -def load_configtx(filepath): - with open(filepath, "r", encoding="utf-8") as f: - return yaml.load(f, Loader=yaml.FullLoader) - - -class ConfigTX: - """Class represents crypto-config yaml.""" - - def __init__( - self, - network, - filepath=CELLO_HOME, - orderer=None, - raft_option=None, - template_path="/opt/config/configtx.yaml", - ): - """init ConfigTX - param: - network: network's name - orderer: configuration of output block - raft_option: configuration of raft - filepath: cello's working directory - return: - """ - self.filepath = filepath - self.network = network - self.template = load_configtx(template_path) - - def create( - self, - name, - consensus, - orderers, - peers, - orderer_cfg=None, - application=None, - option=None, - ): - """create the configtx.yaml - param: - consensus:consensus - orderers:the list of orderer - peers: the list of peer - orderer_cfg: the config of orderer - application: application - option: option - return: - """ - OrdererDefaults = self.template["Orderer"] - ChannelDefaults = self.template["Channel"] - ApplicationDefaults = self.template["Application"] - ChannelCapabilities = self.template["Capabilities"]["Channel"] - OrdererCapabilities = self.template["Capabilities"]["Orderer"] - ApplicationCapabilities = self.template["Capabilities"]["Application"] - - OrdererOrganizations = [] - OrdererAddress = [] - Consenters = [] - - for orderer in orderers: - OrdererMSP = "OrdererMSP" - OrdererOrg = dict( - Name="Orderer", - ID=OrdererMSP, - MSPDir="{}/{}/crypto-config/ordererOrganizations/{}/msp".format( - self.filepath, - orderer["name"], - orderer["name"].split(".", 1)[1], - ), - Policies=dict( - Readers=dict( - Type="Signature", - Rule="OR('{}.member')".format(OrdererMSP), - ), - Writers=dict( - Type="Signature", - Rule="OR('{}.member')".format(OrdererMSP), - ), - Admins=dict( - Type="Signature", - Rule="OR('{}.admin')".format(OrdererMSP), - ), - ), - ) - for host in orderer["hosts"]: - OrdererAddress.append( - "{}.{}:{}".format( - host["name"], orderer["name"].split(".", 1)[1], 7050 - ) - ) - Consenters.append( - dict( - Host="{}.{}".format( - host["name"], orderer["name"].split(".", 1)[1] - ), - Port=7050, - ClientTLSCert="{}/{}/crypto-config/ordererOrganizations/{}/orderers/{}.{}/tls/server.crt".format( - self.filepath, - orderer["name"], - orderer["name"].split(".", 1)[1], - host["name"], - orderer["name"].split(".", 1)[1], - ), - ServerTLSCert="{}/{}/crypto-config/ordererOrganizations/{}/orderers/{}.{}/tls/server.crt".format( - self.filepath, - orderer["name"], - orderer["name"].split(".", 1)[1], - host["name"], - orderer["name"].split(".", 1)[1], - ), - ) - ) - OrdererOrg["OrdererEndpoints"] = deepcopy(OrdererAddress) - OrdererOrganizations.append(OrdererOrg) - - PeerOrganizations = [] - - for peer in peers: - PeerMSP = peer["name"].split(".", 1)[0].capitalize() + "MSP" - PeerOrganizations.append( - dict( - Name=peer["name"].split(".", 1)[0].capitalize(), - ID=PeerMSP, - MSPDir="{}/{}/crypto-config/peerOrganizations/{}/msp".format( - self.filepath, peer["name"], peer["name"] - ), - Policies=dict( - Readers=dict( - Type="Signature", - Rule="OR('{}.admin', '{}.peer', '{}.client')".format( - PeerMSP, PeerMSP, PeerMSP - ), - ), - Writers=dict( - Type="Signature", - Rule="OR('{}.admin', '{}.client')".format( - PeerMSP, PeerMSP - ), - ), - Admins=dict( - Type="Signature", - Rule="OR('{}.admin')".format(PeerMSP), - ), - Endorsement=dict( - Type="Signature", - Rule="OR('{}.peer')".format(PeerMSP), - ), - ), - ) - ) - Organizations = OrdererOrganizations + PeerOrganizations - Capabilities = dict( - Channel=ChannelCapabilities, - Orderer=OrdererCapabilities, - Application=ApplicationCapabilities, - ) - Application = deepcopy(ApplicationDefaults) - Orderer = deepcopy(OrdererDefaults) - Orderer["Addresses"] = deepcopy(OrdererAddress) - Channel = deepcopy(ChannelDefaults) - Application["Capabilities"] = Capabilities["Application"] - Channel["Capabilities"] = Capabilities["Channel"] - Orderer["Capabilities"] = Capabilities["Orderer"] - Orderer["OrdererType"] = consensus - Orderer["EtcdRaft"]["Consenters"] = deepcopy(Consenters) - - Profiles = {} - Profiles[name] = deepcopy(Channel) - Profiles[name]["Orderer"] = deepcopy(Orderer) - Profiles[name]["Application"] = deepcopy(Application) - Profiles[name]["Capabilities"] = Capabilities["Channel"] - Profiles[name]["Orderer"]["Capabilities"] = Capabilities["Orderer"] - Profiles[name]["Application"]["Capabilities"] = Capabilities[ - "Application" - ] - Profiles[name]["Orderer"]["Organizations"] = OrdererOrganizations - Profiles[name]["Application"]["Organizations"] = PeerOrganizations - - configtx = dict( - Organizations=Organizations, - Capabilities=Capabilities, - Application=Application, - Orderer=Orderer, - Channel=Channel, - Profiles=Profiles, - ) - os.system("mkdir -p {}/{}".format(self.filepath, self.network)) - - with open( - "{}/{}/configtx.yaml".format(self.filepath, self.network), - "w", - encoding="utf-8", - ) as f: - yaml.dump(configtx, f, sort_keys=False) - - def createChannel(self, name, organizations): - """create the channel.tx - param: - name: name of channel - organizations: Organizations ready to join the channel - return: - """ - try: - with open( - "{}/{}/{}".format( - self.filepath, self.network, "configtx.yaml" - ), - "r+", - encoding="utf-8", - ) as f: - configtx = yaml.load(f, Loader=yaml.FullLoader) - Profiles = configtx["Profiles"] - Channel = configtx["Channel"] - Orderer = configtx["Orderer"] - Application = configtx["Application"] - PeerOrganizations = [] - for org in configtx["Organizations"]: - for item in organizations: - if org["ID"] == item.capitalize() + "MSP": - PeerOrganizations.append(org) - if PeerOrganizations == []: - raise Exception("can't find organnization") - Profiles[name] = deepcopy(Channel) - Profiles[name]["Orderer"] = Orderer - Profiles[name]["Application"] = Application - - with open( - "{}/{}/{}".format( - self.filepath, self.network, "configtx.yaml" - ), - "w", - encoding="utf-8", - ) as f: - yaml.safe_dump(configtx, f, sort_keys=False) - - except Exception as e: - err_msg = "Configtx create channel failed for {}!".format(e) - raise Exception(err_msg) - - -if __name__ == "__main__": - orderers = [ - { - "name": "org1.cello.com", - "hosts": [{"name": "orderer1", "port": 8051}], - } - ] - # peers = [{"name": "org1.cello.com", "hosts": [{"name": "foo", "port": 7051},{"name": "car", "port": 7052}]}, - # {"name": "org2.cello.com", "hosts": [{"name": "zoo", "port": 7053}]}] - peers = [ - { - "name": "org1.cello.com", - "hosts": [ - {"name": "foo", "port": 7051}, - {"name": "car", "port": 7052}, - ], - } - ] - ConfigTX("test3").create( - consensus="etcdraft", orderers=orderers, peers=peers - ) - # tx = ConfigTX("test3") - # print(tx.template) diff --git a/src/api-engine/api/lib/configtxgen/configtxgen.py b/src/api-engine/api/lib/configtxgen/configtxgen.py deleted file mode 100644 index 6574fbe44..000000000 --- a/src/api-engine/api/lib/configtxgen/configtxgen.py +++ /dev/null @@ -1,77 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# - -import subprocess -import logging - -from api_engine.settings import CELLO_HOME, FABRIC_TOOL, FABRIC_VERSION - -LOG = logging.getLogger(__name__) - - -class ConfigTxGen: - """Class represents cryptotxgen.""" - - def __init__( - self, - network, - filepath=CELLO_HOME, - configtxgen=FABRIC_TOOL, - version=FABRIC_VERSION, - ): - """init CryptoGen - param: - network: network's name - configtxgen: tool path - version: version - filepath: cello's working directory - return: - """ - self.network = network - self.configtxgen = configtxgen + "/configtxgen" - self.filepath = filepath - self.version = version - - def genesis(self, profile="", channelid="", outputblock="genesis.block"): - """generate gensis - param: - profile: profile - channelid: channelid - outputblock: outputblock - return: - """ - try: - command = [ - self.configtxgen, - "-configPath", - "{}/{}/".format(self.filepath, self.network), - "-profile", - "{}".format(profile), - "-outputBlock", - "{}/{}/{}".format(self.filepath, self.network, outputblock), - "-channelID", - "{}".format(channelid), - ] - - LOG.info(" ".join(command)) - - subprocess.run(command, check=True) - - except subprocess.CalledProcessError as e: - err_msg = "configtxgen genesis fail! " - raise Exception(err_msg + str(e)) - - except Exception as e: - err_msg = "configtxgen genesis fail! " - raise Exception(err_msg + str(e)) - - def anchorpeer(self, profile, channelid, outputblock): - """set anchorpeer - param: - profile: profile - channelid: channelid - outputblock: outputblock - return: - """ - pass diff --git a/src/api-engine/api/lib/configtxlator/__init__.py b/src/api-engine/api/lib/configtxlator/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/api-engine/api/lib/configtxlator/configtxlator.py b/src/api-engine/api/lib/configtxlator/configtxlator.py deleted file mode 100644 index 19817f057..000000000 --- a/src/api-engine/api/lib/configtxlator/configtxlator.py +++ /dev/null @@ -1,101 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -from subprocess import call, run - - -import logging - -from api_engine.settings import FABRIC_TOOL, FABRIC_VERSION - -LOG = logging.getLogger(__name__) - - -class ConfigTxLator: - """ - Class represents configtxlator CLI. - """ - - def __init__(self, configtxlator=FABRIC_TOOL, version=FABRIC_VERSION): - self.configtxlator = configtxlator + "/configtxlator" - self.version = version - - def proto_encode(self, input, type, output): - """ - Converts a JSON document to protobuf. - - params: - input: A file containing the JSON document. - type: The type of protobuf structure to encode to. For example, 'common.Config'. - output: A file to write the output to. - """ - try: - command = [ - self.configtxlator, - "proto_encode", - "--input={}".format(input), - "--type={}".format(type), - "--output={}".format(output), - ] - - LOG.info(" ".join(command)) - - call(command) - except Exception as e: - err_msg = "configtxlator proto decode fail! " - raise Exception(err_msg + str(e)) - - def proto_decode(self, input, type, output): - """ - Converts a proto message to JSON. - - params: - input: A file containing the JSON document. - type: The type of protobuf structure to decode to. For example, 'common.Config'. - return: - config - """ - try: - command = [ - self.configtxlator, - "proto_decode", - "--type={}".format(type), - "--input={}".format(input), - "--output={}".format(output), - ] - - LOG.info(" ".join(command)) - - call(command) - - except Exception as e: - err_msg = "configtxlator proto decode fail! " - raise Exception(err_msg + str(e)) - - def compute_update(self, original, updated, channel_id, output): - """ - Takes two marshaled common.Config messages and computes the config update which - transitions between the two. - - params: - original: The original config message. - updated: The updated config message. - channel_id: The name of the channel for this update. - output: A file to write the JSON document to. - """ - try: - command = [ - self.configtxlator, - "compute_update", - "--original={}".format(original), - "--updated={}".format(updated), - "--channel_id={}".format(channel_id), - "--output={}".format(output), - ] - - LOG.info(" ".join(command)) - - call(command) - except Exception as e: - err_msg = "configtxlator compute update fail! " - raise Exception(err_msg + str(e)) diff --git a/src/api-engine/api/utils/node_config.py b/src/api-engine/api/utils/node_config.py deleted file mode 100644 index 100295e4c..000000000 --- a/src/api-engine/api/utils/node_config.py +++ /dev/null @@ -1,146 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -from string import Template -import os -import yaml -from api.config import CELLO_HOME - - -class NodeConfig: - """Class represents crypto-config yaml.""" - - def __init__( - self, - org, - peer_file="core.yaml", - orderer_file="orderer.yaml", - ca_file="", - ): - """ - init node config - - :param org: organization name - :param peer: peer profile template - :param ca: ca profile template - :param orderer: orderer profile template - :return: none - :rtype: xxx - """ - self.org = org - self.peer_file = peer_file - self.orderer_file = orderer_file - self.ca_file = ca_file - - @staticmethod - def _render(src, dst, **kw): - """ - Generate configuration file based on parameters - - :param kw: Node configuration parameters,Use the underline interval key。 - e.g., - peer listenAddress, kwargs["peer_listenAddress"]="0.0.0.0:7051" - chaincode builder, kwargs["chaincode_builder"]="hyperledger/fabric-ccenv:1.4.2" - :param src: Node profile template - :param dst: Node profile - :return: none - :rtype: none - """ - try: - with open(src, "r+") as f: - cfg = yaml.load(f, Loader=yaml.FullLoader) - - for key, value in kw.items(): - keys = key.split("_") - # switch = {2: cfg[keys[0]][keys[1]], - # 3: cfg[keys[0]][keys[1]][keys[2]], - # 4: cfg[keys[0]][keys[1]][keys[2]][keys[3]], - # 5: cfg[keys[0]][keys[1]][keys[2]][keys[3]][keys[4]]} - - if len(keys) == 2: - cfg[keys[0]][keys[1]] = value - elif len(keys) == 3: - cfg[keys[0]][keys[1]][keys[2]] = value - elif len(keys) == 4: - cfg[keys[0]][keys[1]][keys[2]][keys[3]] = value - elif len(keys) == 5: - cfg[keys[0]][keys[1]][keys[2]][keys[3]][keys[4]] = value - - with open(dst, "w+") as f: - yaml.dump(cfg, f) - except Exception as e: - raise Exception("Configuration File Not Generated") from e - - def __from_dst(self, node, node_type): - """ - Location of the new profile - - :param node: node name - :param node_type: node type (peer, orderer, ca) - :return: dst - :rtype: string - """ - if node_type == "peer": - dst = "{}/{}/crypto-config/peerOrganizations/{}/peers/{}.{}/{}".format( - CELLO_HOME, self.org, self.org, node, self.org, self.peer_file - ) - elif node_type == "orderer": - dst = "{}/{}/crypto-config/ordererOrganizations/{}/orderers/{}.{}/{}".format( - CELLO_HOME, - self.org, - self.org.split(".", 1)[1], - node, - self.org.split(".", 1)[1], - self.orderer_file, - ) - else: - dst = "" - return dst - - def peer(self, node, **kwargs): - """ - Location of the node profile - - :param node: peer name - :param kwargs: Node configuration parameters,Use the underline interval key。 - e.g., - peer listenAddress, kwargs["peer_listenAddress"]="0.0.0.0:7051" - chaincode builder, kwargs["chaincode_builder"]="hyperledger/fabric-ccenv:1.4.2" - :return: none - :rtype: none - """ - src = "/opt/node/core.yaml.bak" - dst = self.__from_dst(node, "peer") - self._render(src, dst, **kwargs) - - def orderer(self, node, **kwargs): - """ - Location of the orderer profile - - :param node: orderer name - :param kwargs: Node configuration parameters,Use the underline interval key。 - e.g., - peer listenAddress, kwargs["peer_listenAddress"]="0.0.0.0:7051" - chaincode builder, kwargs["chaincode_builder"]="hyperledger/fabric-ccenv:1.4.2" - :return: none - :rtype: none - """ - src = "/opt/node/orderer.yaml.bak" - dst = self.__from_dst(node, "orderer") - self._render(src, dst, **kwargs) - - def ca(self, node, **kwargs): - """ - Location of the orderer profile - - :param node: ca name - :param kwargs: Node configuration parameters,Use the underline interval key。 - e.g., - peer listenAddress, kwargs["peer_listenAddress"]="0.0.0.0:7051" - chaincode builder, kwargs["chaincode_builder"]="hyperledger/fabric-ccenv:1.4.2" - :return: none - :rtype: none - """ - src = self.ca_file - dst = self.__from_dst(node, "ca") - self._render(src, dst, **kwargs) From c4fab83cfc937dc662623da115846433730b40cc Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 08:57:09 +0800 Subject: [PATCH 09/67] Make migrations Signed-off-by: dodo920306 --- .../chaincode/migrations/0001_initial.py | 41 +++++++++++++++ .../chaincode/migrations/0002_initial.py | 29 +++++++++++ .../channel/migrations/0001_initial.py | 30 +++++++++++ .../node/migrations/0001_initial.py | 34 +++++++++++++ .../organization/migrations/0001_initial.py | 29 +++++++++++ .../user/migrations/0001_initial.py | 50 +++++++++++++++++++ 6 files changed, 213 insertions(+) create mode 100644 src/api-engine/chaincode/migrations/0001_initial.py create mode 100644 src/api-engine/chaincode/migrations/0002_initial.py create mode 100644 src/api-engine/channel/migrations/0001_initial.py create mode 100644 src/api-engine/node/migrations/0001_initial.py create mode 100644 src/api-engine/organization/migrations/0001_initial.py create mode 100644 src/api-engine/user/migrations/0001_initial.py diff --git a/src/api-engine/chaincode/migrations/0001_initial.py b/src/api-engine/chaincode/migrations/0001_initial.py new file mode 100644 index 000000000..afafe5215 --- /dev/null +++ b/src/api-engine/chaincode/migrations/0001_initial.py @@ -0,0 +1,41 @@ +# Generated by Django 4.2.16 on 2025-09-28 23:48 + +import chaincode.models +import common.utils +import django.core.validators +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('channel', '0001_initial'), + ] + + operations = [ + migrations.CreateModel( + name='Chaincode', + fields=[ + ('id', models.UUIDField(default=common.utils.make_uuid, editable=False, help_text='Chaincode ID', primary_key=True, serialize=False, unique=True)), + ('package_id', models.CharField(editable=False, help_text='Chaincode Package ID', max_length=128, unique=True)), + ('package', models.FileField(help_text='Chaincode Package', upload_to=chaincode.models.get_package_path)), + ('name', models.CharField(help_text='Chaincode Name', max_length=128)), + ('version', models.CharField(help_text='Chaincode Version', max_length=128)), + ('sequence', models.IntegerField(help_text='Chaincode Sequence', validators=[django.core.validators.MinValueValidator(1)])), + ('label', models.CharField(help_text='Chaincode Label', max_length=128)), + ('language', models.CharField(help_text='Chaincode Language', max_length=128)), + ('init_required', models.BooleanField(default=False, help_text='Whether Chaincode Initialization Required')), + ('signature_policy', models.CharField(blank=True, help_text='Chaincode Signature Policy', null=True)), + ('status', models.CharField(choices=[('CREATED', 'Created'), ('INSTALLED', 'Installed'), ('APPROVED', 'Approved'), ('COMMITTED', 'Committed')], default='CREATED', help_text='Chaincode Status', max_length=16)), + ('description', models.CharField(blank=True, help_text='Chaincode Description', max_length=128, null=True)), + ('created_at', models.DateTimeField(auto_now_add=True, help_text='Chaincode Creation Timestamp')), + ('channel', models.ForeignKey(help_text='Chaincode Channel', on_delete=django.db.models.deletion.CASCADE, related_name='chaincodes', to='channel.channel')), + ], + options={ + 'ordering': ('-created_at',), + }, + ), + ] diff --git a/src/api-engine/chaincode/migrations/0002_initial.py b/src/api-engine/chaincode/migrations/0002_initial.py new file mode 100644 index 000000000..01eabb16f --- /dev/null +++ b/src/api-engine/chaincode/migrations/0002_initial.py @@ -0,0 +1,29 @@ +# Generated by Django 4.2.16 on 2025-09-28 23:48 + +from django.conf import settings +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('chaincode', '0001_initial'), + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('node', '0001_initial'), + ] + + operations = [ + migrations.AddField( + model_name='chaincode', + name='creator', + field=models.ForeignKey(help_text='Chaincode Creator', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL), + ), + migrations.AddField( + model_name='chaincode', + name='peers', + field=models.ManyToManyField(help_text='Chaincode Installed Peers', to='node.node'), + ), + ] diff --git a/src/api-engine/channel/migrations/0001_initial.py b/src/api-engine/channel/migrations/0001_initial.py new file mode 100644 index 000000000..3e8e5d5b0 --- /dev/null +++ b/src/api-engine/channel/migrations/0001_initial.py @@ -0,0 +1,30 @@ +# Generated by Django 4.2.16 on 2025-09-28 23:48 + +import common.utils +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('organization', '0001_initial'), + ('node', '0001_initial'), + ] + + operations = [ + migrations.CreateModel( + name='Channel', + fields=[ + ('id', models.UUIDField(default=common.utils.make_uuid, editable=False, help_text='Channel ID', primary_key=True, serialize=False, unique=True)), + ('name', models.CharField(help_text='Channel Name', max_length=128)), + ('created_at', models.DateTimeField(auto_now_add=True, help_text='Channel Creation Timestamp')), + ('orderers', models.ManyToManyField(help_text='Channel Orderers', to='node.node')), + ('organizations', models.ManyToManyField(help_text='Channel Organizations', related_name='channels', to='organization.organization')), + ], + options={ + 'ordering': ('-created_at',), + }, + ), + ] diff --git a/src/api-engine/node/migrations/0001_initial.py b/src/api-engine/node/migrations/0001_initial.py new file mode 100644 index 000000000..007ec3443 --- /dev/null +++ b/src/api-engine/node/migrations/0001_initial.py @@ -0,0 +1,34 @@ +# Generated by Django 4.2.16 on 2025-09-28 23:48 + +import common.utils +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('organization', '0001_initial'), + ] + + operations = [ + migrations.CreateModel( + name='Node', + fields=[ + ('id', models.UUIDField(default=common.utils.make_uuid, help_text='Node ID', primary_key=True, serialize=False)), + ('name', models.CharField(help_text='Node Name', max_length=64)), + ('type', models.CharField(choices=[('PEER', 'Peer'), ('ORDERER', 'Orderer')], help_text='Node Type', max_length=64)), + ('created_at', models.DateTimeField(auto_now_add=True, help_text='Node Creation Timestamp')), + ('status', models.CharField(choices=[('CREATED', 'Created'), ('RUNNING', 'Running'), ('FAILED', 'Failed')], default='CREATED', help_text='Node Status', max_length=64)), + ('config_file', models.TextField(help_text='Node Config File', null=True)), + ('msp', models.TextField(help_text='Node MSP', null=True)), + ('tls', models.TextField(help_text='Node TLS', null=True)), + ('organization', models.ForeignKey(help_text='Organization Nodes', on_delete=django.db.models.deletion.CASCADE, related_name='nodes', to='organization.organization')), + ], + options={ + 'ordering': ('-created_at',), + }, + ), + ] diff --git a/src/api-engine/organization/migrations/0001_initial.py b/src/api-engine/organization/migrations/0001_initial.py new file mode 100644 index 000000000..fc8095a3f --- /dev/null +++ b/src/api-engine/organization/migrations/0001_initial.py @@ -0,0 +1,29 @@ +# Generated by Django 4.2.16 on 2025-09-28 23:48 + +import common.utils +import common.validators +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='Organization', + fields=[ + ('id', models.UUIDField(default=common.utils.make_uuid, help_text='ID of organization', primary_key=True, serialize=False)), + ('name', models.CharField(help_text='Name of organization', max_length=64, unique=True, validators=[common.validators.validate_host])), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('msp', models.TextField(help_text='msp of organization', null=True)), + ('tls', models.TextField(help_text='tls of organization', null=True)), + ], + options={ + 'ordering': ('-created_at',), + }, + ), + ] diff --git a/src/api-engine/user/migrations/0001_initial.py b/src/api-engine/user/migrations/0001_initial.py new file mode 100644 index 000000000..c06ac0ba4 --- /dev/null +++ b/src/api-engine/user/migrations/0001_initial.py @@ -0,0 +1,50 @@ +# Generated by Django 4.2.16 on 2025-09-28 23:48 + +import common.utils +import django.contrib.auth.models +import django.contrib.auth.validators +from django.db import migrations, models +import django.db.models.deletion +import django.utils.timezone + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('auth', '0012_alter_user_first_name_max_length'), + ('organization', '0001_initial'), + ] + + operations = [ + migrations.CreateModel( + name='UserProfile', + fields=[ + ('password', models.CharField(max_length=128, verbose_name='password')), + ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), + ('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')), + ('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')), + ('first_name', models.CharField(blank=True, max_length=150, verbose_name='first name')), + ('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')), + ('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')), + ('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')), + ('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')), + ('id', models.UUIDField(default=common.utils.make_uuid, help_text='User ID', primary_key=True, serialize=False)), + ('email', models.EmailField(db_index=True, max_length=254, unique=True)), + ('role', models.CharField(choices=[('ADMIN', 'Admin'), ('USER', 'User')], default='USER', help_text='User Role', max_length=64)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.group', verbose_name='groups')), + ('organization', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='users', to='organization.organization')), + ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.permission', verbose_name='user permissions')), + ], + options={ + 'verbose_name': 'User Info', + 'verbose_name_plural': 'User Info', + 'ordering': ['-date_joined'], + }, + managers=[ + ('objects', django.contrib.auth.models.UserManager()), + ], + ), + ] From 1577c0710cd69177686ddaf9a59b66ffdc0f0688 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 09:02:08 +0800 Subject: [PATCH 10/67] Move Docker file into api engine Signed-off-by: dodo920306 --- {build_image/docker/common => src}/api-engine/Dockerfile.in | 0 {build_image/docker/common => src}/api-engine/entrypoint.sh | 0 {build_image/docker/common => src}/api-engine/server.ini | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename {build_image/docker/common => src}/api-engine/Dockerfile.in (100%) rename {build_image/docker/common => src}/api-engine/entrypoint.sh (100%) rename {build_image/docker/common => src}/api-engine/server.ini (100%) diff --git a/build_image/docker/common/api-engine/Dockerfile.in b/src/api-engine/Dockerfile.in similarity index 100% rename from build_image/docker/common/api-engine/Dockerfile.in rename to src/api-engine/Dockerfile.in diff --git a/build_image/docker/common/api-engine/entrypoint.sh b/src/api-engine/entrypoint.sh similarity index 100% rename from build_image/docker/common/api-engine/entrypoint.sh rename to src/api-engine/entrypoint.sh diff --git a/build_image/docker/common/api-engine/server.ini b/src/api-engine/server.ini similarity index 100% rename from build_image/docker/common/api-engine/server.ini rename to src/api-engine/server.ini From 21e575f3142aae4a987ca85fcd1c69e65db31962 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 09:17:23 +0800 Subject: [PATCH 11/67] Simplify entrypoint Signed-off-by: dodo920306 --- .gitignore | 8 +- src/api-engine/Dockerfile.in | 13 +- src/api-engine/cello/node/core.yaml.bak | 800 +++++++++++++++++++++ src/api-engine/cello/node/orderer.yaml.bak | 427 +++++++++++ src/api-engine/entrypoint.sh | 36 +- 5 files changed, 1246 insertions(+), 38 deletions(-) create mode 100644 src/api-engine/cello/node/core.yaml.bak create mode 100644 src/api-engine/cello/node/orderer.yaml.bak diff --git a/.gitignore b/.gitignore index 1bbcff3c7..702012875 100644 --- a/.gitignore +++ b/.gitignore @@ -137,7 +137,6 @@ fabric.properties bin/ tmp/ *.tmp -*.bak *.swp *~.nib local.properties @@ -219,7 +218,6 @@ yarn-error.log /coverage .idea package-lock.json -*bak .vscode # visual studio code @@ -244,9 +242,9 @@ src/dashboard/lambda/mock/index.js .config .config.old -# Cello local storage -/**/opt/* -/**/cello/* +# Fabric +src/api-engine/cello/builders +src/api-engine/cello/config # Local Netlify folder .netlify diff --git a/src/api-engine/Dockerfile.in b/src/api-engine/Dockerfile.in index 8cfba44b9..76ab1d597 100644 --- a/src/api-engine/Dockerfile.in +++ b/src/api-engine/Dockerfile.in @@ -10,20 +10,15 @@ RUN apt-get update \ # Set the working dir WORKDIR /var/www/server -# Install compiled code tools from Artifactory and copy it to opt folder. -RUN curl -L --retry 5 --retry-delay 3 "https://github.com/hyperledger/fabric/releases/download/v2.5.13/hyperledger-fabric-linux-amd64-2.5.13.tar.gz" | tar xz -C /opt/ - # Copy source code to the working dir COPY src/api-engine ./ -COPY template/node /opt/node + +# Install compiled code tools from Artifactory and copy it to opt folder. +RUN curl -L --retry 5 --retry-delay 3 "https://github.com/hyperledger/fabric/releases/download/v2.5.13/hyperledger-fabric-linux-amd64-2.5.10.tar.gz" | tar xz -C ./cello/ # Install python dependencies RUN pip3 install -r requirements.txt -# Add uwsgi configuration file -COPY build_image/docker/common/api-engine/server.ini /etc/uwsgi/apps-enabled/ - ENV RUN_MODE=server -COPY build_image/docker/common/api-engine/entrypoint.sh / -CMD ["bash", "/entrypoint.sh"] \ No newline at end of file +CMD ["bash", "entrypoint.sh"] diff --git a/src/api-engine/cello/node/core.yaml.bak b/src/api-engine/cello/node/core.yaml.bak new file mode 100644 index 000000000..059379357 --- /dev/null +++ b/src/api-engine/cello/node/core.yaml.bak @@ -0,0 +1,800 @@ +# Copyright IBM Corp. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +# + +############################################################################### +# +# Peer section +# +############################################################################### +peer: + + # The peer id provides a name for this peer instance and is used when + # naming docker resources. + id: jdoe + + # The networkId allows for logical separation of networks and is used when + # naming docker resources. + networkId: dev + + # The Address at local network interface this Peer will listen on. + # By default, it will listen on all network interfaces + listenAddress: 0.0.0.0:7051 + + # The endpoint this peer uses to listen for inbound chaincode connections. + # If this is commented-out, the listen address is selected to be + # the peer's address (see below) with port 7052 + # chaincodeListenAddress: 0.0.0.0:7052 + + # The endpoint the chaincode for this peer uses to connect to the peer. + # If this is not specified, the chaincodeListenAddress address is selected. + # And if chaincodeListenAddress is not specified, address is selected from + # peer address (see below). If specified peer address is invalid then it + # will fallback to the auto detected IP (local IP) regardless of the peer + # addressAutoDetect value. + # chaincodeAddress: 0.0.0.0:7052 + + # When used as peer config, this represents the endpoint to other peers + # in the same organization. For peers in other organization, see + # gossip.externalEndpoint for more info. + # When used as CLI config, this means the peer's endpoint to interact with + address: 0.0.0.0:7051 + + # Whether the Peer should programmatically determine its address + # This case is useful for docker containers. + # When set to true, will override peer address. + addressAutoDetect: false + + # Settings for the Peer's gateway server. + gateway: + # Whether the gateway is enabled for this Peer. + enabled: true + # endorsementTimeout is the duration the gateway waits for a response + # from other endorsing peers before returning a timeout error to the client. + endorsementTimeout: 30s + # broadcastTimeout is the duration the gateway waits for a response + # from ordering nodes before returning a timeout error to the client. + broadcastTimeout: 30s + # dialTimeout is the duration the gateway waits for a connection + # to other network nodes. + dialTimeout: 2m + + + # Keepalive settings for peer server and clients + keepalive: + # Interval is the duration after which if the server does not see + # any activity from the client it pings the client to see if it's alive + interval: 7200s + # Timeout is the duration the server waits for a response + # from the client after sending a ping before closing the connection + timeout: 20s + # MinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the peer server will + # disconnect them + minInterval: 60s + # Client keepalive settings for communicating with other peer nodes + client: + # Interval is the time between pings to peer nodes. This must + # greater than or equal to the minInterval specified by peer + # nodes + interval: 60s + # Timeout is the duration the client waits for a response from + # peer nodes before closing the connection + timeout: 20s + # DeliveryClient keepalive settings for communication with ordering + # nodes. + deliveryClient: + # Interval is the time between pings to ordering nodes. This must + # greater than or equal to the minInterval specified by ordering + # nodes. + interval: 60s + # Timeout is the duration the client waits for a response from + # ordering nodes before closing the connection + timeout: 20s + + + # Gossip related configuration + gossip: + # Bootstrap set to initialize gossip with. + # This is a list of other peers that this peer reaches out to at startup. + # Important: The endpoints here have to be endpoints of peers in the same + # organization, because the peer would refuse connecting to these endpoints + # unless they are in the same organization as the peer. + bootstrap: 127.0.0.1:7051 + + # NOTE: orgLeader and useLeaderElection parameters are mutual exclusive. + # Setting both to true would result in the termination of the peer + # since this is undefined state. If the peers are configured with + # useLeaderElection=false, make sure there is at least 1 peer in the + # organization that its orgLeader is set to true. + + # Defines whenever peer will initialize dynamic algorithm for + # "leader" selection, where leader is the peer to establish + # connection with ordering service and use delivery protocol + # to pull ledger blocks from ordering service. + useLeaderElection: false + # Statically defines peer to be an organization "leader", + # where this means that current peer will maintain connection + # with ordering service and disseminate block across peers in + # its own organization. Multiple peers or all peers in an organization + # may be configured as org leaders, so that they all pull + # blocks directly from ordering service. + orgLeader: true + + # Interval for membershipTracker polling + membershipTrackerInterval: 5s + + # Overrides the endpoint that the peer publishes to peers + # in its organization. For peers in foreign organizations + # see 'externalEndpoint' + endpoint: + # Maximum count of blocks stored in memory + maxBlockCountToStore: 10 + # Max time between consecutive message pushes(unit: millisecond) + maxPropagationBurstLatency: 10ms + # Max number of messages stored until a push is triggered to remote peers + maxPropagationBurstSize: 10 + # Number of times a message is pushed to remote peers + propagateIterations: 1 + # Number of peers selected to push messages to + propagatePeerNum: 3 + # Determines frequency of pull phases(unit: second) + # Must be greater than digestWaitTime + responseWaitTime + pullInterval: 4s + # Number of peers to pull from + pullPeerNum: 3 + # Determines frequency of pulling state info messages from peers(unit: second) + requestStateInfoInterval: 4s + # Determines frequency of pushing state info messages to peers(unit: second) + publishStateInfoInterval: 4s + # Maximum time a stateInfo message is kept until expired + stateInfoRetentionInterval: + # Time from startup certificates are included in Alive messages(unit: second) + publishCertPeriod: 10s + # Should we skip verifying block messages or not (currently not in use) + skipBlockVerification: false + # Dial timeout(unit: second) + dialTimeout: 3s + # Connection timeout(unit: second) + connTimeout: 2s + # Buffer size of received messages + recvBuffSize: 20 + # Buffer size of sending messages + sendBuffSize: 200 + # Time to wait before pull engine processes incoming digests (unit: second) + # Should be slightly smaller than requestWaitTime + digestWaitTime: 1s + # Time to wait before pull engine removes incoming nonce (unit: milliseconds) + # Should be slightly bigger than digestWaitTime + requestWaitTime: 1500ms + # Time to wait before pull engine ends pull (unit: second) + responseWaitTime: 2s + # Alive check interval(unit: second) + aliveTimeInterval: 5s + # Alive expiration timeout(unit: second) + aliveExpirationTimeout: 25s + # Reconnect interval(unit: second) + reconnectInterval: 25s + # Max number of attempts to connect to a peer + maxConnectionAttempts: 120 + # Message expiration factor for alive messages + msgExpirationFactor: 20 + # This is an endpoint that is published to peers outside of the organization. + # If this isn't set, the peer will not be known to other organizations and will not be exposed via service discovery. + externalEndpoint: + # Leader election service configuration + election: + # Longest time peer waits for stable membership during leader election startup (unit: second) + startupGracePeriod: 15s + # Interval gossip membership samples to check its stability (unit: second) + membershipSampleInterval: 1s + # Time passes since last declaration message before peer decides to perform leader election (unit: second) + leaderAliveThreshold: 10s + # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second) + leaderElectionDuration: 5s + + pvtData: + # pullRetryThreshold determines the maximum duration of time private data corresponding for a given block + # would be attempted to be pulled from peers until the block would be committed without the private data + pullRetryThreshold: 60s + # As private data enters the transient store, it is associated with the peer's ledger's height at that time. + # transientstoreMaxBlockRetention defines the maximum difference between the current ledger's height upon commit, + # and the private data residing inside the transient store that is guaranteed not to be purged. + # Private data is purged from the transient store when blocks with sequences that are multiples + # of transientstoreMaxBlockRetention are committed. + transientstoreMaxBlockRetention: 1000 + # pushAckTimeout is the maximum time to wait for an acknowledgement from each peer + # at private data push at endorsement time. + pushAckTimeout: 3s + # Block to live pulling margin, used as a buffer + # to prevent peer from trying to pull private data + # from peers that is soon to be purged in next N blocks. + # This helps a newly joined peer catch up to current + # blockchain height quicker. + btlPullMargin: 10 + # the process of reconciliation is done in an endless loop, while in each iteration reconciler tries to + # pull from the other peers the most recent missing blocks with a maximum batch size limitation. + # reconcileBatchSize determines the maximum batch size of missing private data that will be reconciled in a + # single iteration. + reconcileBatchSize: 10 + # reconcileSleepInterval determines the time reconciler sleeps from end of an iteration until the beginning + # of the next reconciliation iteration. + reconcileSleepInterval: 1m + # reconciliationEnabled is a flag that indicates whether private data reconciliation is enable or not. + reconciliationEnabled: true + # skipPullingInvalidTransactionsDuringCommit is a flag that indicates whether pulling of invalid + # transaction's private data from other peers need to be skipped during the commit time and pulled + # only through reconciler. + skipPullingInvalidTransactionsDuringCommit: false + # implicitCollectionDisseminationPolicy specifies the dissemination policy for the peer's own implicit collection. + # When a peer endorses a proposal that writes to its own implicit collection, below values override the default values + # for disseminating private data. + # Note that it is applicable to all channels the peer has joined. The implication is that requiredPeerCount has to + # be smaller than the number of peers in a channel that has the lowest numbers of peers from the organization. + implicitCollectionDisseminationPolicy: + # requiredPeerCount defines the minimum number of eligible peers to which the peer must successfully + # disseminate private data for its own implicit collection during endorsement. Default value is 0. + requiredPeerCount: 0 + # maxPeerCount defines the maximum number of eligible peers to which the peer will attempt to + # disseminate private data for its own implicit collection during endorsement. Default value is 1. + maxPeerCount: 1 + + # Gossip state transfer related configuration + state: + # indicates whenever state transfer is enabled or not + # default value is false, i.e. state transfer is active + # and takes care to sync up missing blocks allowing + # lagging peer to catch up to speed with rest network. + # Keep in mind that when peer.gossip.useLeaderElection is true + # and there are several peers in the organization, + # or peer.gossip.useLeaderElection is false alongside with + # peer.gossip.orgleader being false, the peer's ledger may lag behind + # the rest of the peers and will never catch up due to state transfer + # being disabled. + enabled: false + # checkInterval interval to check whether peer is lagging behind enough to + # request blocks via state transfer from another peer. + checkInterval: 10s + # responseTimeout amount of time to wait for state transfer response from + # other peers + responseTimeout: 3s + # batchSize the number of blocks to request via state transfer from another peer + batchSize: 10 + # blockBufferSize reflects the size of the re-ordering buffer + # which captures blocks and takes care to deliver them in order + # down to the ledger layer. The actual buffer size is bounded between + # 0 and 2*blockBufferSize, each channel maintains its own buffer + blockBufferSize: 20 + # maxRetries maximum number of re-tries to ask + # for single state transfer request + maxRetries: 3 + + # TLS Settings + tls: + # Require server-side TLS + enabled: false + # Require client certificates / mutual TLS for inbound connections. + # Note that clients that are not configured to use a certificate will + # fail to connect to the peer. + clientAuthRequired: false + # X.509 certificate used for TLS server + cert: + file: tls/server.crt + # Private key used for TLS server + key: + file: tls/server.key + # rootcert.file represents the trusted root certificate chain used for verifying certificates + # of other nodes during outbound connections. + # It is not required to be set, but can be used to augment the set of TLS CA certificates + # available from the MSPs of each channel’s configuration. + rootcert: + file: tls/ca.crt + # If mutual TLS is enabled, clientRootCAs.files contains a list of additional root certificates + # used for verifying certificates of client connections. + # It augments the set of TLS CA certificates available from the MSPs of each channel’s configuration. + # Minimally, set your organization's TLS CA root certificate so that the peer can receive join channel requests. + clientRootCAs: + files: + - tls/ca.crt + # Private key used for TLS when making client connections. + # If not set, peer.tls.key.file will be used instead + clientKey: + file: + # X.509 certificate used for TLS when making client connections. + # If not set, peer.tls.cert.file will be used instead + clientCert: + file: + + # Authentication contains configuration parameters related to authenticating + # client messages + authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + timewindow: 15m + + # Path on the file system where peer will store data (eg ledger). This + # location must be access control protected to prevent unintended + # modification that might corrupt the peer operations. + # The path may be relative to FABRIC_CFG_PATH or an absolute path. + fileSystemPath: /var/hyperledger/production + + # BCCSP (Blockchain crypto provider): Select which crypto implementation or + # library to use + BCCSP: + Default: SW + # Settings for the SW crypto provider (i.e. when DEFAULT: SW) + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of Key Store + FileKeyStore: + # If "", defaults to 'mspConfigPath'/keystore + KeyStore: + # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + PKCS11: + # Location of the PKCS11 module library + Library: + # Token Label + Label: + # User PIN + Pin: + Hash: + Security: + SoftwareVerify: + Immutable: + AltID: + KeyIds: + + # Path on the file system where peer will find MSP local configurations + # The path may be relative to FABRIC_CFG_PATH or an absolute path. + mspConfigPath: msp + + # Identifier of the local MSP + # ----!!!!IMPORTANT!!!-!!!IMPORTANT!!!-!!!IMPORTANT!!!!---- + # Deployers need to change the value of the localMspId string. + # In particular, the name of the local MSP ID of a peer needs + # to match the name of one of the MSPs in each of the channel + # that this peer is a member of. Otherwise this peer's messages + # will not be identified as valid by other nodes. + localMspId: SampleOrg + + # CLI common client config options + client: + # connection timeout + connTimeout: 3s + + # Delivery service related config + deliveryclient: + # Enables this peer to disseminate blocks it pulled from the ordering service + # via gossip. + # Note that 'gossip.state.enabled' controls point to point block replication + # of blocks committed in the past. + blockGossipEnabled: true + # It sets the total time the delivery service may spend in reconnection + # attempts until its retry logic gives up and returns an error, + # ignored if peer is a static leader + reconnectTotalTimeThreshold: 3600s + + # It sets the delivery service <-> ordering service node connection timeout + connTimeout: 3s + + # It sets the delivery service maximal delay between consecutive retries. + # Time between retries will have exponential backoff until hitting this threshold. + reConnectBackoffThreshold: 3600s + + # A list of orderer endpoint addresses which should be overridden + # when found in channel configurations. + addressOverrides: + # - from: + # to: + # caCertsFile: + # - from: + # to: + # caCertsFile: + + # Type for the local MSP - by default it's of type bccsp + localMspType: bccsp + + # Used with Go profiling tools only in none production environment. In + # production, it should be disabled (eg enabled: false) + profile: + enabled: false + listenAddress: 0.0.0.0:6060 + + # Handlers defines custom handlers that can filter and mutate + # objects passing within the peer, such as: + # Auth filter - reject or forward proposals from clients + # Decorators - append or mutate the chaincode input passed to the chaincode + # Endorsers - Custom signing over proposal response payload and its mutation + # Valid handler definition contains: + # - A name which is a factory method name defined in + # core/handlers/library/library.go for statically compiled handlers + # - library path to shared object binary for pluggable filters + # Auth filters and decorators are chained and executed in the order that + # they are defined. For example: + # authFilters: + # - + # name: FilterOne + # library: /opt/lib/filter.so + # - + # name: FilterTwo + # decorators: + # - + # name: DecoratorOne + # - + # name: DecoratorTwo + # library: /opt/lib/decorator.so + # Endorsers are configured as a map that its keys are the endorsement system chaincodes that are being overridden. + # Below is an example that overrides the default ESCC and uses an endorsement plugin that has the same functionality + # as the default ESCC. + # If the 'library' property is missing, the name is used as the constructor method in the builtin library similar + # to auth filters and decorators. + # endorsers: + # escc: + # name: DefaultESCC + # library: /etc/hyperledger/fabric/plugin/escc.so + handlers: + authFilters: + - + name: DefaultAuth + - + name: ExpirationCheck # This filter checks identity x509 certificate expiration + decorators: + - + name: DefaultDecorator + endorsers: + escc: + name: DefaultEndorsement + library: + validators: + vscc: + name: DefaultValidation + library: + + # library: /etc/hyperledger/fabric/plugin/escc.so + # Number of goroutines that will execute transaction validation in parallel. + # By default, the peer chooses the number of CPUs on the machine. Set this + # variable to override that choice. + # NOTE: overriding this value might negatively influence the performance of + # the peer so please change this value only if you know what you're doing + validatorPoolSize: + + # The discovery service is used by clients to query information about peers, + # such as - which peers have joined a certain channel, what is the latest + # channel config, and most importantly - given a chaincode and a channel, + # what possible sets of peers satisfy the endorsement policy. + discovery: + enabled: true + # Whether the authentication cache is enabled or not. + authCacheEnabled: true + # The maximum size of the cache, after which a purge takes place + authCacheMaxSize: 1000 + # The proportion (0 to 1) of entries that remain in the cache after the cache is purged due to overpopulation + authCachePurgeRetentionRatio: 0.75 + # Whether to allow non-admins to perform non channel scoped queries. + # When this is false, it means that only peer admins can perform non channel scoped queries. + orgMembersAllowedAccess: false + + # Limits is used to configure some internal resource limits. + limits: + # Concurrency limits the number of concurrently running requests to a service on each peer. + # Currently this option is only applied to endorser service and deliver service. + # When the property is missing or the value is 0, the concurrency limit is disabled for the service. + concurrency: + # endorserService limits concurrent requests to endorser service that handles chaincode deployment, query and invocation, + # including both user chaincodes and system chaincodes. + endorserService: 2500 + # deliverService limits concurrent event listeners registered to deliver service for blocks and transaction events. + deliverService: 2500 + # gatewayService limits concurrent requests to gateway service that handles the submission and evaluation of transactions. + gatewayService: 500 + + # Since all nodes should be consistent it is recommended to keep + # the default value of 100MB for MaxRecvMsgSize & MaxSendMsgSize + # Max message size in bytes GRPC server and client can receive + maxRecvMsgSize: 104857600 + # Max message size in bytes GRPC server and client can send + maxSendMsgSize: 104857600 + +############################################################################### +# +# VM section +# +############################################################################### +vm: + + # Endpoint of the vm management system. For docker can be one of the following in general + # unix:///var/run/docker.sock + # http://localhost:2375 + # https://localhost:2376 + # If you utilize external chaincode builders and don't need the default Docker chaincode builder, + # the endpoint should be unconfigured so that the peer's Docker health checker doesn't get registered. + endpoint: unix:///var/run/docker.sock + + # settings for docker vms + docker: + tls: + enabled: false + ca: + file: docker/ca.crt + cert: + file: docker/tls.crt + key: + file: docker/tls.key + + # Enables/disables the standard out/err from chaincode containers for + # debugging purposes + attachStdout: false + + # Parameters on creating docker container. + # Container may be efficiently created using ipam & dns-server for cluster + # NetworkMode - sets the networking mode for the container. Supported + # standard values are: `host`(default),`bridge`,`ipvlan`,`none`. + # Dns - a list of DNS servers for the container to use. + # Note: `Privileged` `Binds` `Links` and `PortBindings` properties of + # Docker Host Config are not supported and will not be used if set. + # LogConfig - sets the logging driver (Type) and related options + # (Config) for Docker. For more info, + # https://docs.docker.com/engine/admin/logging/overview/ + # Note: Set LogConfig using Environment Variables is not supported. + hostConfig: + NetworkMode: host + Dns: + # - 192.168.0.1 + LogConfig: + Type: json-file + Config: + max-size: "50m" + max-file: "5" + Memory: 2147483648 + +############################################################################### +# +# Chaincode section +# +############################################################################### +chaincode: + + # The id is used by the Chaincode stub to register the executing Chaincode + # ID with the Peer and is generally supplied through ENV variables + # the `path` form of ID is provided when installing the chaincode. + # The `name` is used for all other requests and can be any string. + id: + path: + name: + + # Generic builder environment, suitable for most chaincode types + builder: $(DOCKER_NS)/fabric-ccenv:$(TWO_DIGIT_VERSION) + + # Enables/disables force pulling of the base docker images (listed below) + # during user chaincode instantiation. + # Useful when using moving image tags (such as :latest) + pull: false + + golang: + # golang will never need more than baseos + runtime: $(DOCKER_NS)/fabric-baseos:$(TWO_DIGIT_VERSION) + + # whether or not golang chaincode should be linked dynamically + dynamicLink: false + + java: + # This is an image based on java:openjdk-8 with addition compiler + # tools added for java shim layer packaging. + # This image is packed with shim layer libraries that are necessary + # for Java chaincode runtime. + runtime: $(DOCKER_NS)/fabric-javaenv:$(TWO_DIGIT_VERSION) + + node: + # This is an image based on node:$(NODE_VER)-alpine + runtime: $(DOCKER_NS)/fabric-nodeenv:$(TWO_DIGIT_VERSION) + + # List of directories to treat as external builders and launchers for + # chaincode. The external builder detection processing will iterate over the + # builders in the order specified below. + # If you don't need to fallback to the default Docker builder, also unconfigure vm.endpoint above. + # To override this property via env variable use CORE_CHAINCODE_EXTERNALBUILDERS: [{name: x, path: dir1}, {name: y, path: dir2}] + # The path must be an absolute path. + externalBuilders: + - name: ccaas_builder + path: /opt/hyperledger/ccaas_builder + propagateEnvironment: + - CHAINCODE_AS_A_SERVICE_BUILDER_CONFIG + + + # The maximum duration to wait for the chaincode build and install process + # to complete. + installTimeout: 300s + + # Timeout duration for starting up a container and waiting for Register + # to come through. + startuptimeout: 300s + + # Timeout duration for Invoke and Init calls to prevent runaway. + # This timeout is used by all chaincodes in all the channels, including + # system chaincodes. + # Note that during Invoke, if the image is not available (e.g. being + # cleaned up when in development environment), the peer will automatically + # build the image, which might take more time. In production environment, + # the chaincode image is unlikely to be deleted, so the timeout could be + # reduced accordingly. + executetimeout: 30s + + # There are 2 modes: "dev" and "net". + # In dev mode, user runs the chaincode after starting peer from + # command line on local machine. + # In net mode, peer will run chaincode in a docker container. + mode: net + + # keepalive in seconds. In situations where the communication goes through a + # proxy that does not support keep-alive, this parameter will maintain connection + # between peer and chaincode. + # A value <= 0 turns keepalive off + keepalive: 0 + + # enabled system chaincodes + system: + _lifecycle: enable + cscc: enable + lscc: enable + qscc: enable + + # Logging section for the chaincode container + logging: + # Default level for all loggers within the chaincode container + level: info + # Override default level for the 'shim' logger + shim: warning + # Format for the chaincode container logs + format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' + +############################################################################### +# +# Ledger section - ledger configuration encompasses both the blockchain +# and the state +# +############################################################################### +ledger: + + blockchain: + + state: + # stateDatabase - options are "goleveldb", "CouchDB" + # goleveldb - default state database stored in goleveldb. + # CouchDB - store state database in CouchDB + stateDatabase: goleveldb + # Limit on the number of records to return per query + totalQueryLimit: 100000 + couchDBConfig: + # It is recommended to run CouchDB on the same server as the peer, and + # not map the CouchDB container port to a server port in docker-compose. + # Otherwise proper security must be provided on the connection between + # CouchDB client (on the peer) and server. + couchDBAddress: 127.0.0.1:5984 + # This username must have read and write authority on CouchDB + username: + # The password is recommended to pass as an environment variable + # during start up (eg CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD). + # If it is stored here, the file must be access control protected + # to prevent unintended users from discovering the password. + password: + # Number of retries for CouchDB errors + maxRetries: 3 + # Number of retries for CouchDB errors during peer startup. + # The delay between retries doubles for each attempt. + # Default of 10 retries results in 11 attempts over 2 minutes. + maxRetriesOnStartup: 10 + # CouchDB request timeout (unit: duration, e.g. 20s) + requestTimeout: 35s + # Limit on the number of records per each CouchDB query + # Note that chaincode queries are only bound by totalQueryLimit. + # Internally the chaincode may execute multiple CouchDB queries, + # each of size internalQueryLimit. + internalQueryLimit: 1000 + # Limit on the number of records per CouchDB bulk update batch + maxBatchUpdateSize: 1000 + # Create the _global_changes system database + # This is optional. Creating the global changes database will require + # additional system resources to track changes and maintain the database + createGlobalChangesDB: false + # CacheSize denotes the maximum mega bytes (MB) to be allocated for the in-memory state + # cache. Note that CacheSize needs to be a multiple of 32 MB. If it is not a multiple + # of 32 MB, the peer would round the size to the next multiple of 32 MB. + # To disable the cache, 0 MB needs to be assigned to the cacheSize. + cacheSize: 64 + + history: + # enableHistoryDatabase - options are true or false + # Indicates if the history of key updates should be stored. + # All history 'index' will be stored in goleveldb, regardless if using + # CouchDB or alternate database for the state. + enableHistoryDatabase: true + + pvtdataStore: + # the maximum db batch size for converting + # the ineligible missing data entries to eligible missing data entries + collElgProcMaxDbBatchSize: 5000 + # the minimum duration (in milliseconds) between writing + # two consecutive db batches for converting the ineligible missing data entries to eligible missing data entries + collElgProcDbBatchesInterval: 1000 + # The missing data entries are classified into two categories: + # (1) prioritized + # (2) deprioritized + # Initially, all missing data are in the prioritized list. When the + # reconciler is unable to fetch the missing data from other peers, + # the unreconciled missing data would be moved to the deprioritized list. + # The reconciler would retry deprioritized missing data after every + # deprioritizedDataReconcilerInterval (unit: minutes). Note that the + # interval needs to be greater than the reconcileSleepInterval + deprioritizedDataReconcilerInterval: 60m + # The frequency to purge private data (in number of blocks). + # Private data is purged from the peer's private data store based on + # the collection property blockToLive or an explicit chaincode call to PurgePrivateData(). + purgeInterval: 100 + # Whether to log private data keys purged from private data store (INFO level) when explicitly purged via chaincode + purgedKeyAuditLogging: true + + snapshots: + # Path on the file system where peer will store ledger snapshots + # The path must be an absolute path. + rootDir: /var/hyperledger/production/snapshots + +############################################################################### +# +# Operations section +# +############################################################################### +operations: + # host and port for the operations server + listenAddress: 127.0.0.1:9443 + + # TLS configuration for the operations endpoint + tls: + # TLS enabled + enabled: false + + # path to PEM encoded server certificate for the operations server + # The paths in this section may be relative to FABRIC_CFG_PATH or an absolute path. + cert: + file: + + # path to PEM encoded server key for the operations server + key: + file: + + # most operations service endpoints require client authentication when TLS + # is enabled. clientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + clientAuthRequired: false + + # paths to PEM encoded ca certificates to trust for client authentication + clientRootCAs: + files: [] + +############################################################################### +# +# Metrics section +# +############################################################################### +metrics: + # metrics provider is one of statsd, prometheus, or disabled + provider: disabled + + # statsd configuration + statsd: + # network type: tcp or udp + network: udp + + # statsd server address + address: 127.0.0.1:8125 + + # the interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + writeInterval: 10s + + # prefix is prepended to all emitted statsd metrics + prefix: \ No newline at end of file diff --git a/src/api-engine/cello/node/orderer.yaml.bak b/src/api-engine/cello/node/orderer.yaml.bak new file mode 100644 index 000000000..6c555f93e --- /dev/null +++ b/src/api-engine/cello/node/orderer.yaml.bak @@ -0,0 +1,427 @@ +# Copyright IBM Corp. All Rights Reserved. +# +# SPDX-License-Identifier: Apache-2.0 +# + +--- +################################################################################ +# +# Orderer Configuration +# +# - This controls the type and configuration of the orderer. +# +################################################################################ +General: + # Listen address: The IP on which to bind to listen. + ListenAddress: 127.0.0.1 + + # Listen port: The port on which to bind to listen. + ListenPort: 7050 + + # TLS: TLS settings for the GRPC server. + TLS: + # Require server-side TLS + Enabled: false + # PrivateKey governs the file location of the private key of the TLS certificate. + PrivateKey: tls/server.key + # Certificate governs the file location of the server TLS certificate. + Certificate: tls/server.crt + # RootCAs contains a list of additional root certificates used for verifying certificates + # of other orderer nodes during outbound connections. + # It is not required to be set, but can be used to augment the set of TLS CA certificates + # available from the MSPs of each channel’s configuration. + RootCAs: + - tls/ca.crt + # Require client certificates / mutual TLS for inbound connections. + ClientAuthRequired: false + # If mutual TLS is enabled, ClientRootCAs contains a list of additional root certificates + # used for verifying certificates of client connections. + # It is not required to be set, but can be used to augment the set of TLS CA certificates + # available from the MSPs of each channel’s configuration. + ClientRootCAs: + # Keepalive settings for the GRPC server. + Keepalive: + # ServerMinInterval is the minimum permitted time between client pings. + # If clients send pings more frequently, the server will + # disconnect them. + ServerMinInterval: 60s + # ServerInterval is the time between pings to clients. + ServerInterval: 7200s + # ServerTimeout is the duration the server waits for a response from + # a client before closing the connection. + ServerTimeout: 20s + + # Since all nodes should be consistent it is recommended to keep + # the default value of 100MB for MaxRecvMsgSize & MaxSendMsgSize + # Max message size in bytes the GRPC server and client can receive + MaxRecvMsgSize: 104857600 + # Max message size in bytes the GRPC server and client can send + MaxSendMsgSize: 104857600 + + # Cluster settings for ordering service nodes that communicate with other ordering service nodes + # such as Raft based ordering service. + Cluster: + # SendBufferSize is the maximum number of messages in the egress buffer. + # Consensus messages are dropped if the buffer is full, and transaction + # messages are waiting for space to be freed. + SendBufferSize: 100 + + # ClientCertificate governs the file location of the client TLS certificate + # used to establish mutual TLS connections with other ordering service nodes. + # If not set, the server General.TLS.Certificate is re-used. + ClientCertificate: + # ClientPrivateKey governs the file location of the private key of the client TLS certificate. + # If not set, the server General.TLS.PrivateKey is re-used. + ClientPrivateKey: + + # The below 4 properties should be either set together, or be unset together. + # If they are set, then the orderer node uses a separate listener for intra-cluster + # communication. If they are unset, then the general orderer listener is used. + # This is useful if you want to use a different TLS server certificates on the + # client-facing and the intra-cluster listeners. + + # ListenPort defines the port on which the cluster listens to connections. + ListenPort: + # ListenAddress defines the IP on which to listen to intra-cluster communication. + ListenAddress: + # ServerCertificate defines the file location of the server TLS certificate used for intra-cluster + # communication. + ServerCertificate: + # ServerPrivateKey defines the file location of the private key of the TLS certificate. + ServerPrivateKey: + + # Bootstrap method: The method by which to obtain the bootstrap block + # system channel is specified. The option can be one of: + # "file" - path to a file containing the genesis block or config block of system channel + # "none" - allows an orderer to start without a system channel configuration + BootstrapMethod: file + + # Bootstrap file: The file containing the bootstrap block to use when + # initializing the orderer system channel and BootstrapMethod is set to + # "file". The bootstrap file can be the genesis block, and it can also be + # a config block for late bootstrap of some consensus methods like Raft. + # Generate a genesis block by updating $FABRIC_CFG_PATH/configtx.yaml and + # using configtxgen command with "-outputBlock" option. + # Defaults to file "genesisblock" (in $FABRIC_CFG_PATH directory) if not specified. + BootstrapFile: + + # LocalMSPDir is where to find the private crypto material needed by the + # orderer. It is set relative here as a default for dev environments but + # should be changed to the real location in production. + LocalMSPDir: msp + + # LocalMSPID is the identity to register the local MSP material with the MSP + # manager. IMPORTANT: The local MSP ID of an orderer needs to match the MSP + # ID of one of the organizations defined in the orderer system channel's + # /Channel/Orderer configuration. The sample organization defined in the + # sample configuration provided has an MSP ID of "SampleOrg". + LocalMSPID: SampleOrg + + # Enable an HTTP service for Go "pprof" profiling as documented at: + # https://golang.org/pkg/net/http/pprof + Profile: + Enabled: false + Address: 0.0.0.0:6060 + + # BCCSP configures the blockchain crypto service providers. + BCCSP: + # Default specifies the preferred blockchain crypto service provider + # to use. If the preferred provider is not available, the software + # based provider ("SW") will be used. + # Valid providers are: + # - SW: a software based crypto provider + # - PKCS11: a CA hardware security module crypto provider. + Default: SW + + # SW configures the software based blockchain crypto provider. + SW: + # TODO: The default Hash and Security level needs refactoring to be + # fully configurable. Changing these defaults requires coordination + # SHA2 is hardcoded in several places, not only BCCSP + Hash: SHA2 + Security: 256 + # Location of key store. If this is unset, a location will be + # chosen using: 'LocalMSPDir'/keystore + FileKeyStore: + KeyStore: + + # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) + PKCS11: + # Location of the PKCS11 module library + Library: + # Token Label + Label: + # User PIN + Pin: + Hash: + Security: + FileKeyStore: + KeyStore: + + # Authentication contains configuration parameters related to authenticating + # client messages + Authentication: + # the acceptable difference between the current server time and the + # client's time as specified in a client request message + TimeWindow: 15m + + +################################################################################ +# +# SECTION: File Ledger +# +# - This section applies to the configuration of the file ledger. +# +################################################################################ +FileLedger: + + # Location: The directory to store the blocks in. + Location: /var/hyperledger/production/orderer + +################################################################################ +# +# SECTION: Kafka +# +# - This section applies to the configuration of the Kafka-based orderer, and +# its interaction with the Kafka cluster. +# +################################################################################ +Kafka: + + # Retry: What do if a connection to the Kafka cluster cannot be established, + # or if a metadata request to the Kafka cluster needs to be repeated. + Retry: + # When a new channel is created, or when an existing channel is reloaded + # (in case of a just-restarted orderer), the orderer interacts with the + # Kafka cluster in the following ways: + # 1. It creates a Kafka producer (writer) for the Kafka partition that + # corresponds to the channel. + # 2. It uses that producer to post a no-op CONNECT message to that + # partition + # 3. It creates a Kafka consumer (reader) for that partition. + # If any of these steps fail, they will be re-attempted every + # for a total of , and then every + # for a total of until they succeed. + # Note that the orderer will be unable to write to or read from a + # channel until all of the steps above have been completed successfully. + ShortInterval: 5s + ShortTotal: 10m + LongInterval: 5m + LongTotal: 12h + # Affects the socket timeouts when waiting for an initial connection, a + # response, or a transmission. See Config.Net for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + NetworkTimeouts: + DialTimeout: 10s + ReadTimeout: 10s + WriteTimeout: 10s + # Affects the metadata requests when the Kafka cluster is in the middle + # of a leader election.See Config.Metadata for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Metadata: + RetryBackoff: 250ms + RetryMax: 3 + # What to do if posting a message to the Kafka cluster fails. See + # Config.Producer for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Producer: + RetryBackoff: 100ms + RetryMax: 3 + # What to do if reading from the Kafka cluster fails. See + # Config.Consumer for more info: + # https://godoc.org/github.com/Shopify/sarama#Config + Consumer: + RetryBackoff: 2s + # Settings to use when creating Kafka topics. Only applies when + # Kafka.Version is v0.10.1.0 or higher + Topic: + # The number of Kafka brokers across which to replicate the topic + ReplicationFactor: 3 + # Verbose: Enable logging for interactions with the Kafka cluster. + Verbose: false + + # TLS: TLS settings for the orderer's connection to the Kafka cluster. + TLS: + + # Enabled: Use TLS when connecting to the Kafka cluster. + Enabled: false + + # PrivateKey: PEM-encoded private key the orderer will use for + # authentication. + PrivateKey: + # As an alternative to specifying the PrivateKey here, uncomment the + # following "File" key and specify the file name from which to load the + # value of PrivateKey. + #File: path/to/PrivateKey + + # Certificate: PEM-encoded signed public key certificate the orderer will + # use for authentication. + Certificate: + # As an alternative to specifying the Certificate here, uncomment the + # following "File" key and specify the file name from which to load the + # value of Certificate. + #File: path/to/Certificate + + # RootCAs: PEM-encoded trusted root certificates used to validate + # certificates from the Kafka cluster. + RootCAs: + # As an alternative to specifying the RootCAs here, uncomment the + # following "File" key and specify the file name from which to load the + # value of RootCAs. + #File: path/to/RootCAs + + # SASLPlain: Settings for using SASL/PLAIN authentication with Kafka brokers + SASLPlain: + # Enabled: Use SASL/PLAIN to authenticate with Kafka brokers + Enabled: false + # User: Required when Enabled is set to true + User: + # Password: Required when Enabled is set to true + Password: + + # Kafka protocol version used to communicate with the Kafka cluster brokers + # (defaults to 0.10.2.0 if not specified) + Version: + +################################################################################ +# +# Debug Configuration +# +# - This controls the debugging options for the orderer +# +################################################################################ +Debug: + + # BroadcastTraceDir when set will cause each request to the Broadcast service + # for this orderer to be written to a file in this directory + BroadcastTraceDir: + + # DeliverTraceDir when set will cause each request to the Deliver service + # for this orderer to be written to a file in this directory + DeliverTraceDir: + +################################################################################ +# +# Operations Configuration +# +# - This configures the operations server endpoint for the orderer +# +################################################################################ +Operations: + # host and port for the operations server + ListenAddress: 127.0.0.1:8443 + + # TLS configuration for the operations endpoint + TLS: + # TLS enabled + Enabled: false + + # Certificate is the location of the PEM encoded TLS certificate + Certificate: + + # PrivateKey points to the location of the PEM-encoded key + PrivateKey: + + # Most operations service endpoints require client authentication when TLS + # is enabled. ClientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + ClientAuthRequired: false + + # Paths to PEM encoded ca certificates to trust for client authentication + ClientRootCAs: [] + +################################################################################ +# +# Metrics Configuration +# +# - This configures metrics collection for the orderer +# +################################################################################ +Metrics: + # The metrics provider is one of statsd, prometheus, or disabled + Provider: disabled + + # The statsd configuration + Statsd: + # network type: tcp or udp + Network: udp + + # the statsd server address + Address: 127.0.0.1:8125 + + # The interval at which locally cached counters and gauges are pushed + # to statsd; timings are pushed immediately + WriteInterval: 30s + + # The prefix is prepended to all emitted statsd metrics + Prefix: + +################################################################################ +# +# Admin Configuration +# +# - This configures the admin server endpoint for the orderer +# +################################################################################ +Admin: + # host and port for the admin server + ListenAddress: 127.0.0.1:9443 + + # TLS configuration for the admin endpoint + TLS: + # TLS enabled + Enabled: false + + # Certificate is the location of the PEM encoded TLS certificate + Certificate: + + # PrivateKey points to the location of the PEM-encoded key + PrivateKey: + + # Most admin service endpoints require client authentication when TLS + # is enabled. ClientAuthRequired requires client certificate authentication + # at the TLS layer to access all resources. + # + # NOTE: When TLS is enabled, the admin endpoint requires mutual TLS. The + # orderer will panic on startup if this value is set to false. + ClientAuthRequired: true + + # Paths to PEM encoded ca certificates to trust for client authentication + ClientRootCAs: [] + +################################################################################ +# +# Channel participation API Configuration +# +# - This provides the channel participation API configuration for the orderer. +# - Channel participation uses the ListenAddress and TLS settings of the Admin +# service. +# +################################################################################ +ChannelParticipation: + # Channel participation API is enabled. + Enabled: false + + # The maximum size of the request body when joining a channel. + MaxRequestBodySize: 1 MB + + +################################################################################ +# +# Consensus Configuration +# +# - This section contains config options for a consensus plugin. It is opaque +# to orderer, and completely up to consensus implementation to make use of. +# +################################################################################ +Consensus: + # The allowed key-value pairs here depend on consensus plugin. For etcd/raft, + # we use following options: + + # WALDir specifies the location at which Write Ahead Logs for etcd/raft are + # stored. Each channel will have its own subdir named after channel ID. + WALDir: /var/hyperledger/production/orderer/etcdraft/wal + + # SnapDir specifies the location at which snapshots for etcd/raft are + # stored. Each channel will have its own subdir named after channel ID. + SnapDir: /var/hyperledger/production/orderer/etcdraft/snapshot diff --git a/src/api-engine/entrypoint.sh b/src/api-engine/entrypoint.sh index 76cad69cd..85f68ba36 100755 --- a/src/api-engine/entrypoint.sh +++ b/src/api-engine/entrypoint.sh @@ -1,27 +1,15 @@ #!/usr/bin/env bash -#bash /scripts/initial.sh; - -echo "Generating the settings.py for api_engine" -LOCAL_SETTINGS="/var/www/server/api_engine/settings.py" -RAW_LOCAL_SETTINGS="/var/www/server/api_engine/settings.py.example" - -envsubst < ${RAW_LOCAL_SETTINGS} > ${LOCAL_SETTINGS} - -holdup -t 120 tcp://${DB_HOST}:${DB_PORT}; -if [[ "$RUN_MODE" == "server" ]]; then - python manage.py migrate; - python manage.py create_user \ - --username ${API_ENGINE_ADMIN_USERNAME:-admin} \ - --password ${API_ENGINE_ADMIN_PASSWORD:-pass} \ - --email ${API_ENGINE_ADMIN_EMAIL:-admin@cello.com} \ - --is_superuser \ - --role admin - if [[ "$DEBUG" == "True" ]]; then # For dev, use pure Django directly - python manage.py runserver 0.0.0.0:8080; - else # For production, use uwsgi in front - uwsgi --ini /etc/uwsgi/apps-enabled/server.ini; - fi -else - celery -A api_engine worker -l info +holdup -t 120 tcp://${DB_HOST:-localhost}:${DB_PORT:-5432}; +python manage.py migrate; +python manage.py create_user \ + --username ${API_ENGINE_ADMIN_EMAIL:-admin@cello.com} \ + --password ${API_ENGINE_ADMIN_PASSWORD:-pass} \ + --email ${API_ENGINE_ADMIN_EMAIL:-admin@cello.com} \ + --is_superuser \ + --role admin +if [[ "${DEBUG:-True,,}" == "true" ]]; then # For dev, use pure Django directly + python manage.py runserver 0.0.0.0:8080; +else # For production, use uwsgi in front + uwsgi --ini server.ini; fi From f9f1b5f37a32c401e2e85d57f16c05d887f17dda Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 09:19:34 +0800 Subject: [PATCH 12/67] Remove Fabric Node templates Move inside api engine. Signed-off-by: dodo920306 --- template/node/core.yaml.bak | 800 --------------------------------- template/node/orderer.yaml.bak | 427 ------------------ 2 files changed, 1227 deletions(-) delete mode 100644 template/node/core.yaml.bak delete mode 100644 template/node/orderer.yaml.bak diff --git a/template/node/core.yaml.bak b/template/node/core.yaml.bak deleted file mode 100644 index 7809b001d..000000000 --- a/template/node/core.yaml.bak +++ /dev/null @@ -1,800 +0,0 @@ -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - -############################################################################### -# -# Peer section -# -############################################################################### -peer: - - # The peer id provides a name for this peer instance and is used when - # naming docker resources. - id: jdoe - - # The networkId allows for logical separation of networks and is used when - # naming docker resources. - networkId: dev - - # The Address at local network interface this Peer will listen on. - # By default, it will listen on all network interfaces - listenAddress: 0.0.0.0:7051 - - # The endpoint this peer uses to listen for inbound chaincode connections. - # If this is commented-out, the listen address is selected to be - # the peer's address (see below) with port 7052 - # chaincodeListenAddress: 0.0.0.0:7052 - - # The endpoint the chaincode for this peer uses to connect to the peer. - # If this is not specified, the chaincodeListenAddress address is selected. - # And if chaincodeListenAddress is not specified, address is selected from - # peer address (see below). If specified peer address is invalid then it - # will fallback to the auto detected IP (local IP) regardless of the peer - # addressAutoDetect value. - # chaincodeAddress: 0.0.0.0:7052 - - # When used as peer config, this represents the endpoint to other peers - # in the same organization. For peers in other organization, see - # gossip.externalEndpoint for more info. - # When used as CLI config, this means the peer's endpoint to interact with - address: 0.0.0.0:7051 - - # Whether the Peer should programmatically determine its address - # This case is useful for docker containers. - # When set to true, will override peer address. - addressAutoDetect: false - - # Settings for the Peer's gateway server. - gateway: - # Whether the gateway is enabled for this Peer. - enabled: true - # endorsementTimeout is the duration the gateway waits for a response - # from other endorsing peers before returning a timeout error to the client. - endorsementTimeout: 30s - # broadcastTimeout is the duration the gateway waits for a response - # from ordering nodes before returning a timeout error to the client. - broadcastTimeout: 30s - # dialTimeout is the duration the gateway waits for a connection - # to other network nodes. - dialTimeout: 2m - - - # Keepalive settings for peer server and clients - keepalive: - # Interval is the duration after which if the server does not see - # any activity from the client it pings the client to see if it's alive - interval: 7200s - # Timeout is the duration the server waits for a response - # from the client after sending a ping before closing the connection - timeout: 20s - # MinInterval is the minimum permitted time between client pings. - # If clients send pings more frequently, the peer server will - # disconnect them - minInterval: 60s - # Client keepalive settings for communicating with other peer nodes - client: - # Interval is the time between pings to peer nodes. This must - # greater than or equal to the minInterval specified by peer - # nodes - interval: 60s - # Timeout is the duration the client waits for a response from - # peer nodes before closing the connection - timeout: 20s - # DeliveryClient keepalive settings for communication with ordering - # nodes. - deliveryClient: - # Interval is the time between pings to ordering nodes. This must - # greater than or equal to the minInterval specified by ordering - # nodes. - interval: 60s - # Timeout is the duration the client waits for a response from - # ordering nodes before closing the connection - timeout: 20s - - - # Gossip related configuration - gossip: - # Bootstrap set to initialize gossip with. - # This is a list of other peers that this peer reaches out to at startup. - # Important: The endpoints here have to be endpoints of peers in the same - # organization, because the peer would refuse connecting to these endpoints - # unless they are in the same organization as the peer. - bootstrap: 127.0.0.1:7051 - - # NOTE: orgLeader and useLeaderElection parameters are mutual exclusive. - # Setting both to true would result in the termination of the peer - # since this is undefined state. If the peers are configured with - # useLeaderElection=false, make sure there is at least 1 peer in the - # organization that its orgLeader is set to true. - - # Defines whenever peer will initialize dynamic algorithm for - # "leader" selection, where leader is the peer to establish - # connection with ordering service and use delivery protocol - # to pull ledger blocks from ordering service. - useLeaderElection: false - # Statically defines peer to be an organization "leader", - # where this means that current peer will maintain connection - # with ordering service and disseminate block across peers in - # its own organization. Multiple peers or all peers in an organization - # may be configured as org leaders, so that they all pull - # blocks directly from ordering service. - orgLeader: true - - # Interval for membershipTracker polling - membershipTrackerInterval: 5s - - # Overrides the endpoint that the peer publishes to peers - # in its organization. For peers in foreign organizations - # see 'externalEndpoint' - endpoint: - # Maximum count of blocks stored in memory - maxBlockCountToStore: 10 - # Max time between consecutive message pushes(unit: millisecond) - maxPropagationBurstLatency: 10ms - # Max number of messages stored until a push is triggered to remote peers - maxPropagationBurstSize: 10 - # Number of times a message is pushed to remote peers - propagateIterations: 1 - # Number of peers selected to push messages to - propagatePeerNum: 3 - # Determines frequency of pull phases(unit: second) - # Must be greater than digestWaitTime + responseWaitTime - pullInterval: 4s - # Number of peers to pull from - pullPeerNum: 3 - # Determines frequency of pulling state info messages from peers(unit: second) - requestStateInfoInterval: 4s - # Determines frequency of pushing state info messages to peers(unit: second) - publishStateInfoInterval: 4s - # Maximum time a stateInfo message is kept until expired - stateInfoRetentionInterval: - # Time from startup certificates are included in Alive messages(unit: second) - publishCertPeriod: 10s - # Should we skip verifying block messages or not (currently not in use) - skipBlockVerification: false - # Dial timeout(unit: second) - dialTimeout: 3s - # Connection timeout(unit: second) - connTimeout: 2s - # Buffer size of received messages - recvBuffSize: 20 - # Buffer size of sending messages - sendBuffSize: 200 - # Time to wait before pull engine processes incoming digests (unit: second) - # Should be slightly smaller than requestWaitTime - digestWaitTime: 1s - # Time to wait before pull engine removes incoming nonce (unit: milliseconds) - # Should be slightly bigger than digestWaitTime - requestWaitTime: 1500ms - # Time to wait before pull engine ends pull (unit: second) - responseWaitTime: 2s - # Alive check interval(unit: second) - aliveTimeInterval: 5s - # Alive expiration timeout(unit: second) - aliveExpirationTimeout: 25s - # Reconnect interval(unit: second) - reconnectInterval: 25s - # Max number of attempts to connect to a peer - maxConnectionAttempts: 120 - # Message expiration factor for alive messages - msgExpirationFactor: 20 - # This is an endpoint that is published to peers outside of the organization. - # If this isn't set, the peer will not be known to other organizations and will not be exposed via service discovery. - externalEndpoint: - # Leader election service configuration - election: - # Longest time peer waits for stable membership during leader election startup (unit: second) - startupGracePeriod: 15s - # Interval gossip membership samples to check its stability (unit: second) - membershipSampleInterval: 1s - # Time passes since last declaration message before peer decides to perform leader election (unit: second) - leaderAliveThreshold: 10s - # Time between peer sends propose message and declares itself as a leader (sends declaration message) (unit: second) - leaderElectionDuration: 5s - - pvtData: - # pullRetryThreshold determines the maximum duration of time private data corresponding for a given block - # would be attempted to be pulled from peers until the block would be committed without the private data - pullRetryThreshold: 60s - # As private data enters the transient store, it is associated with the peer's ledger's height at that time. - # transientstoreMaxBlockRetention defines the maximum difference between the current ledger's height upon commit, - # and the private data residing inside the transient store that is guaranteed not to be purged. - # Private data is purged from the transient store when blocks with sequences that are multiples - # of transientstoreMaxBlockRetention are committed. - transientstoreMaxBlockRetention: 1000 - # pushAckTimeout is the maximum time to wait for an acknowledgement from each peer - # at private data push at endorsement time. - pushAckTimeout: 3s - # Block to live pulling margin, used as a buffer - # to prevent peer from trying to pull private data - # from peers that is soon to be purged in next N blocks. - # This helps a newly joined peer catch up to current - # blockchain height quicker. - btlPullMargin: 10 - # the process of reconciliation is done in an endless loop, while in each iteration reconciler tries to - # pull from the other peers the most recent missing blocks with a maximum batch size limitation. - # reconcileBatchSize determines the maximum batch size of missing private data that will be reconciled in a - # single iteration. - reconcileBatchSize: 10 - # reconcileSleepInterval determines the time reconciler sleeps from end of an iteration until the beginning - # of the next reconciliation iteration. - reconcileSleepInterval: 1m - # reconciliationEnabled is a flag that indicates whether private data reconciliation is enable or not. - reconciliationEnabled: true - # skipPullingInvalidTransactionsDuringCommit is a flag that indicates whether pulling of invalid - # transaction's private data from other peers need to be skipped during the commit time and pulled - # only through reconciler. - skipPullingInvalidTransactionsDuringCommit: false - # implicitCollectionDisseminationPolicy specifies the dissemination policy for the peer's own implicit collection. - # When a peer endorses a proposal that writes to its own implicit collection, below values override the default values - # for disseminating private data. - # Note that it is applicable to all channels the peer has joined. The implication is that requiredPeerCount has to - # be smaller than the number of peers in a channel that has the lowest numbers of peers from the organization. - implicitCollectionDisseminationPolicy: - # requiredPeerCount defines the minimum number of eligible peers to which the peer must successfully - # disseminate private data for its own implicit collection during endorsement. Default value is 0. - requiredPeerCount: 0 - # maxPeerCount defines the maximum number of eligible peers to which the peer will attempt to - # disseminate private data for its own implicit collection during endorsement. Default value is 1. - maxPeerCount: 1 - - # Gossip state transfer related configuration - state: - # indicates whenever state transfer is enabled or not - # default value is false, i.e. state transfer is active - # and takes care to sync up missing blocks allowing - # lagging peer to catch up to speed with rest network. - # Keep in mind that when peer.gossip.useLeaderElection is true - # and there are several peers in the organization, - # or peer.gossip.useLeaderElection is false alongside with - # peer.gossip.orgleader being false, the peer's ledger may lag behind - # the rest of the peers and will never catch up due to state transfer - # being disabled. - enabled: false - # checkInterval interval to check whether peer is lagging behind enough to - # request blocks via state transfer from another peer. - checkInterval: 10s - # responseTimeout amount of time to wait for state transfer response from - # other peers - responseTimeout: 3s - # batchSize the number of blocks to request via state transfer from another peer - batchSize: 10 - # blockBufferSize reflects the size of the re-ordering buffer - # which captures blocks and takes care to deliver them in order - # down to the ledger layer. The actual buffer size is bounded between - # 0 and 2*blockBufferSize, each channel maintains its own buffer - blockBufferSize: 20 - # maxRetries maximum number of re-tries to ask - # for single state transfer request - maxRetries: 3 - - # TLS Settings - tls: - # Require server-side TLS - enabled: false - # Require client certificates / mutual TLS for inbound connections. - # Note that clients that are not configured to use a certificate will - # fail to connect to the peer. - clientAuthRequired: false - # X.509 certificate used for TLS server - cert: - file: tls/server.crt - # Private key used for TLS server - key: - file: tls/server.key - # rootcert.file represents the trusted root certificate chain used for verifying certificates - # of other nodes during outbound connections. - # It is not required to be set, but can be used to augment the set of TLS CA certificates - # available from the MSPs of each channel’s configuration. - rootcert: - file: tls/ca.crt - # If mutual TLS is enabled, clientRootCAs.files contains a list of additional root certificates - # used for verifying certificates of client connections. - # It augments the set of TLS CA certificates available from the MSPs of each channel’s configuration. - # Minimally, set your organization's TLS CA root certificate so that the peer can receive join channel requests. - clientRootCAs: - files: - - tls/ca.crt - # Private key used for TLS when making client connections. - # If not set, peer.tls.key.file will be used instead - clientKey: - file: - # X.509 certificate used for TLS when making client connections. - # If not set, peer.tls.cert.file will be used instead - clientCert: - file: - - # Authentication contains configuration parameters related to authenticating - # client messages - authentication: - # the acceptable difference between the current server time and the - # client's time as specified in a client request message - timewindow: 15m - - # Path on the file system where peer will store data (eg ledger). This - # location must be access control protected to prevent unintended - # modification that might corrupt the peer operations. - # The path may be relative to FABRIC_CFG_PATH or an absolute path. - fileSystemPath: /var/hyperledger/production - - # BCCSP (Blockchain crypto provider): Select which crypto implementation or - # library to use - BCCSP: - Default: SW - # Settings for the SW crypto provider (i.e. when DEFAULT: SW) - SW: - # TODO: The default Hash and Security level needs refactoring to be - # fully configurable. Changing these defaults requires coordination - # SHA2 is hardcoded in several places, not only BCCSP - Hash: SHA2 - Security: 256 - # Location of Key Store - FileKeyStore: - # If "", defaults to 'mspConfigPath'/keystore - KeyStore: - # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) - PKCS11: - # Location of the PKCS11 module library - Library: - # Token Label - Label: - # User PIN - Pin: - Hash: - Security: - SoftwareVerify: - Immutable: - AltID: - KeyIds: - - # Path on the file system where peer will find MSP local configurations - # The path may be relative to FABRIC_CFG_PATH or an absolute path. - mspConfigPath: msp - - # Identifier of the local MSP - # ----!!!!IMPORTANT!!!-!!!IMPORTANT!!!-!!!IMPORTANT!!!!---- - # Deployers need to change the value of the localMspId string. - # In particular, the name of the local MSP ID of a peer needs - # to match the name of one of the MSPs in each of the channel - # that this peer is a member of. Otherwise this peer's messages - # will not be identified as valid by other nodes. - localMspId: SampleOrg - - # CLI common client config options - client: - # connection timeout - connTimeout: 3s - - # Delivery service related config - deliveryclient: - # Enables this peer to disseminate blocks it pulled from the ordering service - # via gossip. - # Note that 'gossip.state.enabled' controls point to point block replication - # of blocks committed in the past. - blockGossipEnabled: true - # It sets the total time the delivery service may spend in reconnection - # attempts until its retry logic gives up and returns an error, - # ignored if peer is a static leader - reconnectTotalTimeThreshold: 3600s - - # It sets the delivery service <-> ordering service node connection timeout - connTimeout: 3s - - # It sets the delivery service maximal delay between consecutive retries. - # Time between retries will have exponential backoff until hitting this threshold. - reConnectBackoffThreshold: 3600s - - # A list of orderer endpoint addresses which should be overridden - # when found in channel configurations. - addressOverrides: - # - from: - # to: - # caCertsFile: - # - from: - # to: - # caCertsFile: - - # Type for the local MSP - by default it's of type bccsp - localMspType: bccsp - - # Used with Go profiling tools only in none production environment. In - # production, it should be disabled (eg enabled: false) - profile: - enabled: false - listenAddress: 0.0.0.0:6060 - - # Handlers defines custom handlers that can filter and mutate - # objects passing within the peer, such as: - # Auth filter - reject or forward proposals from clients - # Decorators - append or mutate the chaincode input passed to the chaincode - # Endorsers - Custom signing over proposal response payload and its mutation - # Valid handler definition contains: - # - A name which is a factory method name defined in - # core/handlers/library/library.go for statically compiled handlers - # - library path to shared object binary for pluggable filters - # Auth filters and decorators are chained and executed in the order that - # they are defined. For example: - # authFilters: - # - - # name: FilterOne - # library: /opt/lib/filter.so - # - - # name: FilterTwo - # decorators: - # - - # name: DecoratorOne - # - - # name: DecoratorTwo - # library: /opt/lib/decorator.so - # Endorsers are configured as a map that its keys are the endorsement system chaincodes that are being overridden. - # Below is an example that overrides the default ESCC and uses an endorsement plugin that has the same functionality - # as the default ESCC. - # If the 'library' property is missing, the name is used as the constructor method in the builtin library similar - # to auth filters and decorators. - # endorsers: - # escc: - # name: DefaultESCC - # library: /etc/hyperledger/fabric/plugin/escc.so - handlers: - authFilters: - - - name: DefaultAuth - - - name: ExpirationCheck # This filter checks identity x509 certificate expiration - decorators: - - - name: DefaultDecorator - endorsers: - escc: - name: DefaultEndorsement - library: - validators: - vscc: - name: DefaultValidation - library: - - # library: /etc/hyperledger/fabric/plugin/escc.so - # Number of goroutines that will execute transaction validation in parallel. - # By default, the peer chooses the number of CPUs on the machine. Set this - # variable to override that choice. - # NOTE: overriding this value might negatively influence the performance of - # the peer so please change this value only if you know what you're doing - validatorPoolSize: - - # The discovery service is used by clients to query information about peers, - # such as - which peers have joined a certain channel, what is the latest - # channel config, and most importantly - given a chaincode and a channel, - # what possible sets of peers satisfy the endorsement policy. - discovery: - enabled: true - # Whether the authentication cache is enabled or not. - authCacheEnabled: true - # The maximum size of the cache, after which a purge takes place - authCacheMaxSize: 1000 - # The proportion (0 to 1) of entries that remain in the cache after the cache is purged due to overpopulation - authCachePurgeRetentionRatio: 0.75 - # Whether to allow non-admins to perform non channel scoped queries. - # When this is false, it means that only peer admins can perform non channel scoped queries. - orgMembersAllowedAccess: false - - # Limits is used to configure some internal resource limits. - limits: - # Concurrency limits the number of concurrently running requests to a service on each peer. - # Currently this option is only applied to endorser service and deliver service. - # When the property is missing or the value is 0, the concurrency limit is disabled for the service. - concurrency: - # endorserService limits concurrent requests to endorser service that handles chaincode deployment, query and invocation, - # including both user chaincodes and system chaincodes. - endorserService: 2500 - # deliverService limits concurrent event listeners registered to deliver service for blocks and transaction events. - deliverService: 2500 - # gatewayService limits concurrent requests to gateway service that handles the submission and evaluation of transactions. - gatewayService: 500 - - # Since all nodes should be consistent it is recommended to keep - # the default value of 100MB for MaxRecvMsgSize & MaxSendMsgSize - # Max message size in bytes GRPC server and client can receive - maxRecvMsgSize: 104857600 - # Max message size in bytes GRPC server and client can send - maxSendMsgSize: 104857600 - -############################################################################### -# -# VM section -# -############################################################################### -vm: - - # Endpoint of the vm management system. For docker can be one of the following in general - # unix:///var/run/docker.sock - # http://localhost:2375 - # https://localhost:2376 - # If you utilize external chaincode builders and don't need the default Docker chaincode builder, - # the endpoint should be unconfigured so that the peer's Docker health checker doesn't get registered. - endpoint: unix:///var/run/docker.sock - - # settings for docker vms - docker: - tls: - enabled: false - ca: - file: docker/ca.crt - cert: - file: docker/tls.crt - key: - file: docker/tls.key - - # Enables/disables the standard out/err from chaincode containers for - # debugging purposes - attachStdout: false - - # Parameters on creating docker container. - # Container may be efficiently created using ipam & dns-server for cluster - # NetworkMode - sets the networking mode for the container. Supported - # standard values are: `host`(default),`bridge`,`ipvlan`,`none`. - # Dns - a list of DNS servers for the container to use. - # Note: `Privileged` `Binds` `Links` and `PortBindings` properties of - # Docker Host Config are not supported and will not be used if set. - # LogConfig - sets the logging driver (Type) and related options - # (Config) for Docker. For more info, - # https://docs.docker.com/engine/admin/logging/overview/ - # Note: Set LogConfig using Environment Variables is not supported. - hostConfig: - NetworkMode: host - Dns: - # - 192.168.0.1 - LogConfig: - Type: json-file - Config: - max-size: "50m" - max-file: "5" - Memory: 2147483648 - -############################################################################### -# -# Chaincode section -# -############################################################################### -chaincode: - - # The id is used by the Chaincode stub to register the executing Chaincode - # ID with the Peer and is generally supplied through ENV variables - # the `path` form of ID is provided when installing the chaincode. - # The `name` is used for all other requests and can be any string. - id: - path: - name: - - # Generic builder environment, suitable for most chaincode types - builder: $(DOCKER_NS)/fabric-ccenv:$(TWO_DIGIT_VERSION) - - # Enables/disables force pulling of the base docker images (listed below) - # during user chaincode instantiation. - # Useful when using moving image tags (such as :latest) - pull: false - - golang: - # golang will never need more than baseos - runtime: $(DOCKER_NS)/fabric-baseos:$(TWO_DIGIT_VERSION) - - # whether or not golang chaincode should be linked dynamically - dynamicLink: false - - java: - # This is an image based on java:openjdk-8 with addition compiler - # tools added for java shim layer packaging. - # This image is packed with shim layer libraries that are necessary - # for Java chaincode runtime. - runtime: $(DOCKER_NS)/fabric-javaenv:$(TWO_DIGIT_VERSION) - - node: - # This is an image based on node:$(NODE_VER)-alpine - runtime: $(DOCKER_NS)/fabric-nodeenv:$(TWO_DIGIT_VERSION) - - # List of directories to treat as external builders and launchers for - # chaincode. The external builder detection processing will iterate over the - # builders in the order specified below. - # If you don't need to fallback to the default Docker builder, also unconfigure vm.endpoint above. - # To override this property via env variable use CORE_CHAINCODE_EXTERNALBUILDERS: [{name: x, path: dir1}, {name: y, path: dir2}] - # The path must be an absolute path. - externalBuilders: - - name: ccaas_builder - path: /opt/hyperledger/ccaas_builder - propagateEnvironment: - - CHAINCODE_AS_A_SERVICE_BUILDER_CONFIG - - - # The maximum duration to wait for the chaincode build and install process - # to complete. - installTimeout: 300s - - # Timeout duration for starting up a container and waiting for Register - # to come through. - startuptimeout: 300s - - # Timeout duration for Invoke and Init calls to prevent runaway. - # This timeout is used by all chaincodes in all the channels, including - # system chaincodes. - # Note that during Invoke, if the image is not available (e.g. being - # cleaned up when in development environment), the peer will automatically - # build the image, which might take more time. In production environment, - # the chaincode image is unlikely to be deleted, so the timeout could be - # reduced accordingly. - executetimeout: 30s - - # There are 2 modes: "dev" and "net". - # In dev mode, user runs the chaincode after starting peer from - # command line on local machine. - # In net mode, peer will run chaincode in a docker container. - mode: net - - # keepalive in seconds. In situations where the communication goes through a - # proxy that does not support keep-alive, this parameter will maintain connection - # between peer and chaincode. - # A value <= 0 turns keepalive off - keepalive: 0 - - # enabled system chaincodes - system: - _lifecycle: enable - cscc: enable - lscc: enable - qscc: enable - - # Logging section for the chaincode container - logging: - # Default level for all loggers within the chaincode container - level: info - # Override default level for the 'shim' logger - shim: warning - # Format for the chaincode container logs - format: '%{color}%{time:2006-01-02 15:04:05.000 MST} [%{module}] %{shortfunc} -> %{level:.4s} %{id:03x}%{color:reset} %{message}' - -############################################################################### -# -# Ledger section - ledger configuration encompasses both the blockchain -# and the state -# -############################################################################### -ledger: - - blockchain: - - state: - # stateDatabase - options are "goleveldb", "CouchDB" - # goleveldb - default state database stored in goleveldb. - # CouchDB - store state database in CouchDB - stateDatabase: goleveldb - # Limit on the number of records to return per query - totalQueryLimit: 100000 - couchDBConfig: - # It is recommended to run CouchDB on the same server as the peer, and - # not map the CouchDB container port to a server port in docker-compose. - # Otherwise proper security must be provided on the connection between - # CouchDB client (on the peer) and server. - couchDBAddress: 127.0.0.1:5984 - # This username must have read and write authority on CouchDB - username: - # The password is recommended to pass as an environment variable - # during start up (eg CORE_LEDGER_STATE_COUCHDBCONFIG_PASSWORD). - # If it is stored here, the file must be access control protected - # to prevent unintended users from discovering the password. - password: - # Number of retries for CouchDB errors - maxRetries: 3 - # Number of retries for CouchDB errors during peer startup. - # The delay between retries doubles for each attempt. - # Default of 10 retries results in 11 attempts over 2 minutes. - maxRetriesOnStartup: 10 - # CouchDB request timeout (unit: duration, e.g. 20s) - requestTimeout: 35s - # Limit on the number of records per each CouchDB query - # Note that chaincode queries are only bound by totalQueryLimit. - # Internally the chaincode may execute multiple CouchDB queries, - # each of size internalQueryLimit. - internalQueryLimit: 1000 - # Limit on the number of records per CouchDB bulk update batch - maxBatchUpdateSize: 1000 - # Create the _global_changes system database - # This is optional. Creating the global changes database will require - # additional system resources to track changes and maintain the database - createGlobalChangesDB: false - # CacheSize denotes the maximum mega bytes (MB) to be allocated for the in-memory state - # cache. Note that CacheSize needs to be a multiple of 32 MB. If it is not a multiple - # of 32 MB, the peer would round the size to the next multiple of 32 MB. - # To disable the cache, 0 MB needs to be assigned to the cacheSize. - cacheSize: 64 - - history: - # enableHistoryDatabase - options are true or false - # Indicates if the history of key updates should be stored. - # All history 'index' will be stored in goleveldb, regardless if using - # CouchDB or alternate database for the state. - enableHistoryDatabase: true - - pvtdataStore: - # the maximum db batch size for converting - # the ineligible missing data entries to eligible missing data entries - collElgProcMaxDbBatchSize: 5000 - # the minimum duration (in milliseconds) between writing - # two consecutive db batches for converting the ineligible missing data entries to eligible missing data entries - collElgProcDbBatchesInterval: 1000 - # The missing data entries are classified into two categories: - # (1) prioritized - # (2) deprioritized - # Initially, all missing data are in the prioritized list. When the - # reconciler is unable to fetch the missing data from other peers, - # the unreconciled missing data would be moved to the deprioritized list. - # The reconciler would retry deprioritized missing data after every - # deprioritizedDataReconcilerInterval (unit: minutes). Note that the - # interval needs to be greater than the reconcileSleepInterval - deprioritizedDataReconcilerInterval: 60m - # The frequency to purge private data (in number of blocks). - # Private data is purged from the peer's private data store based on - # the collection property blockToLive or an explicit chaincode call to PurgePrivateData(). - purgeInterval: 100 - # Whether to log private data keys purged from private data store (INFO level) when explicitly purged via chaincode - purgedKeyAuditLogging: true - - snapshots: - # Path on the file system where peer will store ledger snapshots - # The path must be an absolute path. - rootDir: /var/hyperledger/production/snapshots - -############################################################################### -# -# Operations section -# -############################################################################### -operations: - # host and port for the operations server - listenAddress: 127.0.0.1:9443 - - # TLS configuration for the operations endpoint - tls: - # TLS enabled - enabled: false - - # path to PEM encoded server certificate for the operations server - # The paths in this section may be relative to FABRIC_CFG_PATH or an absolute path. - cert: - file: - - # path to PEM encoded server key for the operations server - key: - file: - - # most operations service endpoints require client authentication when TLS - # is enabled. clientAuthRequired requires client certificate authentication - # at the TLS layer to access all resources. - clientAuthRequired: false - - # paths to PEM encoded ca certificates to trust for client authentication - clientRootCAs: - files: [] - -############################################################################### -# -# Metrics section -# -############################################################################### -metrics: - # metrics provider is one of statsd, prometheus, or disabled - provider: disabled - - # statsd configuration - statsd: - # network type: tcp or udp - network: udp - - # statsd server address - address: 127.0.0.1:8125 - - # the interval at which locally cached counters and gauges are pushed - # to statsd; timings are pushed immediately - writeInterval: 10s - - # prefix is prepended to all emitted statsd metrics - prefix: diff --git a/template/node/orderer.yaml.bak b/template/node/orderer.yaml.bak deleted file mode 100644 index 6c555f93e..000000000 --- a/template/node/orderer.yaml.bak +++ /dev/null @@ -1,427 +0,0 @@ -# Copyright IBM Corp. All Rights Reserved. -# -# SPDX-License-Identifier: Apache-2.0 -# - ---- -################################################################################ -# -# Orderer Configuration -# -# - This controls the type and configuration of the orderer. -# -################################################################################ -General: - # Listen address: The IP on which to bind to listen. - ListenAddress: 127.0.0.1 - - # Listen port: The port on which to bind to listen. - ListenPort: 7050 - - # TLS: TLS settings for the GRPC server. - TLS: - # Require server-side TLS - Enabled: false - # PrivateKey governs the file location of the private key of the TLS certificate. - PrivateKey: tls/server.key - # Certificate governs the file location of the server TLS certificate. - Certificate: tls/server.crt - # RootCAs contains a list of additional root certificates used for verifying certificates - # of other orderer nodes during outbound connections. - # It is not required to be set, but can be used to augment the set of TLS CA certificates - # available from the MSPs of each channel’s configuration. - RootCAs: - - tls/ca.crt - # Require client certificates / mutual TLS for inbound connections. - ClientAuthRequired: false - # If mutual TLS is enabled, ClientRootCAs contains a list of additional root certificates - # used for verifying certificates of client connections. - # It is not required to be set, but can be used to augment the set of TLS CA certificates - # available from the MSPs of each channel’s configuration. - ClientRootCAs: - # Keepalive settings for the GRPC server. - Keepalive: - # ServerMinInterval is the minimum permitted time between client pings. - # If clients send pings more frequently, the server will - # disconnect them. - ServerMinInterval: 60s - # ServerInterval is the time between pings to clients. - ServerInterval: 7200s - # ServerTimeout is the duration the server waits for a response from - # a client before closing the connection. - ServerTimeout: 20s - - # Since all nodes should be consistent it is recommended to keep - # the default value of 100MB for MaxRecvMsgSize & MaxSendMsgSize - # Max message size in bytes the GRPC server and client can receive - MaxRecvMsgSize: 104857600 - # Max message size in bytes the GRPC server and client can send - MaxSendMsgSize: 104857600 - - # Cluster settings for ordering service nodes that communicate with other ordering service nodes - # such as Raft based ordering service. - Cluster: - # SendBufferSize is the maximum number of messages in the egress buffer. - # Consensus messages are dropped if the buffer is full, and transaction - # messages are waiting for space to be freed. - SendBufferSize: 100 - - # ClientCertificate governs the file location of the client TLS certificate - # used to establish mutual TLS connections with other ordering service nodes. - # If not set, the server General.TLS.Certificate is re-used. - ClientCertificate: - # ClientPrivateKey governs the file location of the private key of the client TLS certificate. - # If not set, the server General.TLS.PrivateKey is re-used. - ClientPrivateKey: - - # The below 4 properties should be either set together, or be unset together. - # If they are set, then the orderer node uses a separate listener for intra-cluster - # communication. If they are unset, then the general orderer listener is used. - # This is useful if you want to use a different TLS server certificates on the - # client-facing and the intra-cluster listeners. - - # ListenPort defines the port on which the cluster listens to connections. - ListenPort: - # ListenAddress defines the IP on which to listen to intra-cluster communication. - ListenAddress: - # ServerCertificate defines the file location of the server TLS certificate used for intra-cluster - # communication. - ServerCertificate: - # ServerPrivateKey defines the file location of the private key of the TLS certificate. - ServerPrivateKey: - - # Bootstrap method: The method by which to obtain the bootstrap block - # system channel is specified. The option can be one of: - # "file" - path to a file containing the genesis block or config block of system channel - # "none" - allows an orderer to start without a system channel configuration - BootstrapMethod: file - - # Bootstrap file: The file containing the bootstrap block to use when - # initializing the orderer system channel and BootstrapMethod is set to - # "file". The bootstrap file can be the genesis block, and it can also be - # a config block for late bootstrap of some consensus methods like Raft. - # Generate a genesis block by updating $FABRIC_CFG_PATH/configtx.yaml and - # using configtxgen command with "-outputBlock" option. - # Defaults to file "genesisblock" (in $FABRIC_CFG_PATH directory) if not specified. - BootstrapFile: - - # LocalMSPDir is where to find the private crypto material needed by the - # orderer. It is set relative here as a default for dev environments but - # should be changed to the real location in production. - LocalMSPDir: msp - - # LocalMSPID is the identity to register the local MSP material with the MSP - # manager. IMPORTANT: The local MSP ID of an orderer needs to match the MSP - # ID of one of the organizations defined in the orderer system channel's - # /Channel/Orderer configuration. The sample organization defined in the - # sample configuration provided has an MSP ID of "SampleOrg". - LocalMSPID: SampleOrg - - # Enable an HTTP service for Go "pprof" profiling as documented at: - # https://golang.org/pkg/net/http/pprof - Profile: - Enabled: false - Address: 0.0.0.0:6060 - - # BCCSP configures the blockchain crypto service providers. - BCCSP: - # Default specifies the preferred blockchain crypto service provider - # to use. If the preferred provider is not available, the software - # based provider ("SW") will be used. - # Valid providers are: - # - SW: a software based crypto provider - # - PKCS11: a CA hardware security module crypto provider. - Default: SW - - # SW configures the software based blockchain crypto provider. - SW: - # TODO: The default Hash and Security level needs refactoring to be - # fully configurable. Changing these defaults requires coordination - # SHA2 is hardcoded in several places, not only BCCSP - Hash: SHA2 - Security: 256 - # Location of key store. If this is unset, a location will be - # chosen using: 'LocalMSPDir'/keystore - FileKeyStore: - KeyStore: - - # Settings for the PKCS#11 crypto provider (i.e. when DEFAULT: PKCS11) - PKCS11: - # Location of the PKCS11 module library - Library: - # Token Label - Label: - # User PIN - Pin: - Hash: - Security: - FileKeyStore: - KeyStore: - - # Authentication contains configuration parameters related to authenticating - # client messages - Authentication: - # the acceptable difference between the current server time and the - # client's time as specified in a client request message - TimeWindow: 15m - - -################################################################################ -# -# SECTION: File Ledger -# -# - This section applies to the configuration of the file ledger. -# -################################################################################ -FileLedger: - - # Location: The directory to store the blocks in. - Location: /var/hyperledger/production/orderer - -################################################################################ -# -# SECTION: Kafka -# -# - This section applies to the configuration of the Kafka-based orderer, and -# its interaction with the Kafka cluster. -# -################################################################################ -Kafka: - - # Retry: What do if a connection to the Kafka cluster cannot be established, - # or if a metadata request to the Kafka cluster needs to be repeated. - Retry: - # When a new channel is created, or when an existing channel is reloaded - # (in case of a just-restarted orderer), the orderer interacts with the - # Kafka cluster in the following ways: - # 1. It creates a Kafka producer (writer) for the Kafka partition that - # corresponds to the channel. - # 2. It uses that producer to post a no-op CONNECT message to that - # partition - # 3. It creates a Kafka consumer (reader) for that partition. - # If any of these steps fail, they will be re-attempted every - # for a total of , and then every - # for a total of until they succeed. - # Note that the orderer will be unable to write to or read from a - # channel until all of the steps above have been completed successfully. - ShortInterval: 5s - ShortTotal: 10m - LongInterval: 5m - LongTotal: 12h - # Affects the socket timeouts when waiting for an initial connection, a - # response, or a transmission. See Config.Net for more info: - # https://godoc.org/github.com/Shopify/sarama#Config - NetworkTimeouts: - DialTimeout: 10s - ReadTimeout: 10s - WriteTimeout: 10s - # Affects the metadata requests when the Kafka cluster is in the middle - # of a leader election.See Config.Metadata for more info: - # https://godoc.org/github.com/Shopify/sarama#Config - Metadata: - RetryBackoff: 250ms - RetryMax: 3 - # What to do if posting a message to the Kafka cluster fails. See - # Config.Producer for more info: - # https://godoc.org/github.com/Shopify/sarama#Config - Producer: - RetryBackoff: 100ms - RetryMax: 3 - # What to do if reading from the Kafka cluster fails. See - # Config.Consumer for more info: - # https://godoc.org/github.com/Shopify/sarama#Config - Consumer: - RetryBackoff: 2s - # Settings to use when creating Kafka topics. Only applies when - # Kafka.Version is v0.10.1.0 or higher - Topic: - # The number of Kafka brokers across which to replicate the topic - ReplicationFactor: 3 - # Verbose: Enable logging for interactions with the Kafka cluster. - Verbose: false - - # TLS: TLS settings for the orderer's connection to the Kafka cluster. - TLS: - - # Enabled: Use TLS when connecting to the Kafka cluster. - Enabled: false - - # PrivateKey: PEM-encoded private key the orderer will use for - # authentication. - PrivateKey: - # As an alternative to specifying the PrivateKey here, uncomment the - # following "File" key and specify the file name from which to load the - # value of PrivateKey. - #File: path/to/PrivateKey - - # Certificate: PEM-encoded signed public key certificate the orderer will - # use for authentication. - Certificate: - # As an alternative to specifying the Certificate here, uncomment the - # following "File" key and specify the file name from which to load the - # value of Certificate. - #File: path/to/Certificate - - # RootCAs: PEM-encoded trusted root certificates used to validate - # certificates from the Kafka cluster. - RootCAs: - # As an alternative to specifying the RootCAs here, uncomment the - # following "File" key and specify the file name from which to load the - # value of RootCAs. - #File: path/to/RootCAs - - # SASLPlain: Settings for using SASL/PLAIN authentication with Kafka brokers - SASLPlain: - # Enabled: Use SASL/PLAIN to authenticate with Kafka brokers - Enabled: false - # User: Required when Enabled is set to true - User: - # Password: Required when Enabled is set to true - Password: - - # Kafka protocol version used to communicate with the Kafka cluster brokers - # (defaults to 0.10.2.0 if not specified) - Version: - -################################################################################ -# -# Debug Configuration -# -# - This controls the debugging options for the orderer -# -################################################################################ -Debug: - - # BroadcastTraceDir when set will cause each request to the Broadcast service - # for this orderer to be written to a file in this directory - BroadcastTraceDir: - - # DeliverTraceDir when set will cause each request to the Deliver service - # for this orderer to be written to a file in this directory - DeliverTraceDir: - -################################################################################ -# -# Operations Configuration -# -# - This configures the operations server endpoint for the orderer -# -################################################################################ -Operations: - # host and port for the operations server - ListenAddress: 127.0.0.1:8443 - - # TLS configuration for the operations endpoint - TLS: - # TLS enabled - Enabled: false - - # Certificate is the location of the PEM encoded TLS certificate - Certificate: - - # PrivateKey points to the location of the PEM-encoded key - PrivateKey: - - # Most operations service endpoints require client authentication when TLS - # is enabled. ClientAuthRequired requires client certificate authentication - # at the TLS layer to access all resources. - ClientAuthRequired: false - - # Paths to PEM encoded ca certificates to trust for client authentication - ClientRootCAs: [] - -################################################################################ -# -# Metrics Configuration -# -# - This configures metrics collection for the orderer -# -################################################################################ -Metrics: - # The metrics provider is one of statsd, prometheus, or disabled - Provider: disabled - - # The statsd configuration - Statsd: - # network type: tcp or udp - Network: udp - - # the statsd server address - Address: 127.0.0.1:8125 - - # The interval at which locally cached counters and gauges are pushed - # to statsd; timings are pushed immediately - WriteInterval: 30s - - # The prefix is prepended to all emitted statsd metrics - Prefix: - -################################################################################ -# -# Admin Configuration -# -# - This configures the admin server endpoint for the orderer -# -################################################################################ -Admin: - # host and port for the admin server - ListenAddress: 127.0.0.1:9443 - - # TLS configuration for the admin endpoint - TLS: - # TLS enabled - Enabled: false - - # Certificate is the location of the PEM encoded TLS certificate - Certificate: - - # PrivateKey points to the location of the PEM-encoded key - PrivateKey: - - # Most admin service endpoints require client authentication when TLS - # is enabled. ClientAuthRequired requires client certificate authentication - # at the TLS layer to access all resources. - # - # NOTE: When TLS is enabled, the admin endpoint requires mutual TLS. The - # orderer will panic on startup if this value is set to false. - ClientAuthRequired: true - - # Paths to PEM encoded ca certificates to trust for client authentication - ClientRootCAs: [] - -################################################################################ -# -# Channel participation API Configuration -# -# - This provides the channel participation API configuration for the orderer. -# - Channel participation uses the ListenAddress and TLS settings of the Admin -# service. -# -################################################################################ -ChannelParticipation: - # Channel participation API is enabled. - Enabled: false - - # The maximum size of the request body when joining a channel. - MaxRequestBodySize: 1 MB - - -################################################################################ -# -# Consensus Configuration -# -# - This section contains config options for a consensus plugin. It is opaque -# to orderer, and completely up to consensus implementation to make use of. -# -################################################################################ -Consensus: - # The allowed key-value pairs here depend on consensus plugin. For etcd/raft, - # we use following options: - - # WALDir specifies the location at which Write Ahead Logs for etcd/raft are - # stored. Each channel will have its own subdir named after channel ID. - WALDir: /var/hyperledger/production/orderer/etcdraft/wal - - # SnapDir specifies the location at which snapshots for etcd/raft are - # stored. Each channel will have its own subdir named after channel ID. - SnapDir: /var/hyperledger/production/orderer/etcdraft/snapshot From d4d56427bd0953da24f70fa1370beae14ca487c9 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 11:05:25 +0800 Subject: [PATCH 13/67] Remove redundant mkdir Signed-off-by: dodo920306 --- build_image/docker/common/dashboard/Dockerfile.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_image/docker/common/dashboard/Dockerfile.in b/build_image/docker/common/dashboard/Dockerfile.in index e9e9dabe1..b32fd5445 100644 --- a/build_image/docker/common/dashboard/Dockerfile.in +++ b/build_image/docker/common/dashboard/Dockerfile.in @@ -2,7 +2,7 @@ FROM node:20.15 WORKDIR /usr/src/app/ USER root -RUN mkdir -p /usr/src/app && cd /usr/src/app +RUN cd /usr/src/app COPY src/dashboard /usr/src/app RUN export NODE_OPTIONS=--openssl-legacy-provider && yarn --network-timeout 600000 && yarn run build From d1edf7608fdd361db332d86fec9ec79056ee4d63 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 11:05:36 +0800 Subject: [PATCH 14/67] Fix missing 'l' Signed-off-by: dodo920306 --- build_image/docker/common/dashboard/default.conf.tmpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build_image/docker/common/dashboard/default.conf.tmpl b/build_image/docker/common/dashboard/default.conf.tmpl index 99f975e74..5c4be36e6 100644 --- a/build_image/docker/common/dashboard/default.conf.tmpl +++ b/build_image/docker/common/dashboard/default.conf.tmpl @@ -13,7 +13,7 @@ server { root /usr/share/nginx/html; location / { - index index.html index.htm; + index index.html index.html; try_files $uri $uri/ /index.html; } From 12f2400e2bdfdbf21eefe626e36873cada7e9c60 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 11:05:52 +0800 Subject: [PATCH 15/67] Fix invalid usage of cross env Signed-off-by: dodo920306 --- src/dashboard/package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/dashboard/package.json b/src/dashboard/package.json index cb2c067db..1a4fabfd8 100644 --- a/src/dashboard/package.json +++ b/src/dashboard/package.json @@ -24,8 +24,8 @@ "lint:style": "stylelint 'src/**/*.less' --syntax less", "prettier": "node ./scripts/prettier.js", "site": "umi build && yarn run functions:build", - "start": "cross-env umi dev PORT=8001", - "start:no-mock": "cross-env MOCK=none umi dev PORT=8002", + "start": "cross-env PORT=8001 umi dev", + "start:no-mock": "cross-env PORT=8002 MOCK=none umi dev", "test": "umi test", "test:all": "node ./tests/run-tests.js", "test:component": "umi test ./src/components", From 1c5ca60984f6ffac37ef06ea2590eae58255dccb Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 11:06:13 +0800 Subject: [PATCH 16/67] Rename 'fakeAccountLogin' to 'login' Signed-off-by: dodo920306 --- src/dashboard/src/services/api.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dashboard/src/services/api.js b/src/dashboard/src/services/api.js index 75e412bae..334da5198 100644 --- a/src/dashboard/src/services/api.js +++ b/src/dashboard/src/services/api.js @@ -2,7 +2,7 @@ import request from '@/utils/request'; // eslint-disable-next-line import/prefer-default-export -export async function fakeAccountLogin(params) { +export async function login(params) { return request('/api/v1/login', { method: 'POST', data: params, From 6007faeb0a53280ca1ba095206f8d138ce474340 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 11:06:36 +0800 Subject: [PATCH 17/67] Rename 'fakeAccountLogin' to 'login' Signed-off-by: dodo920306 --- src/dashboard/src/models/login.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/dashboard/src/models/login.js b/src/dashboard/src/models/login.js index f6fd66d6b..dfe8d9749 100644 --- a/src/dashboard/src/models/login.js +++ b/src/dashboard/src/models/login.js @@ -1,6 +1,6 @@ import { history } from 'umi'; import { stringify } from 'qs'; -import { fakeAccountLogin, register } from '@/services/api'; +import { login, register } from '@/services/api'; import { setAuthority } from '@/utils/authority'; import { getPageQuery } from '@/utils/utils'; import { reloadAuthorized } from '@/utils/Authorized'; @@ -18,7 +18,7 @@ export default { effects: { *login({ payload }, { call, put }) { - const response = yield call(fakeAccountLogin, payload); + const response = yield call(login, payload); // Login successfully if (response.data.token) { const { user, token } = response.data; From dc5fcaa02edb8f1616eec5cc61d7620955f7d430 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 13:56:32 +0800 Subject: [PATCH 18/67] Fix deprecated AntD Menu props Signed-off-by: dodo920306 --- .../components/GlobalHeader/RightContent.js | 26 +- .../src/components/GlobalHeader/index.less | 17 +- .../src/components/HeaderDropdown/index.js | 4 +- .../src/components/SelectLang/index.js | 31 ++- .../src/components/SelectLang/index.less | 9 - src/dashboard/src/pages/User/Login.js | 245 +++++++++--------- 6 files changed, 169 insertions(+), 163 deletions(-) diff --git a/src/dashboard/src/components/GlobalHeader/RightContent.js b/src/dashboard/src/components/GlobalHeader/RightContent.js index cde1acd56..0ae0c0196 100644 --- a/src/dashboard/src/components/GlobalHeader/RightContent.js +++ b/src/dashboard/src/components/GlobalHeader/RightContent.js @@ -9,28 +9,28 @@ import styles from './index.less'; class GlobalHeaderRight extends PureComponent { render() { const { currentUser, onMenuClick, intl } = this.props; - const menu = ( - - - + const menuItems = [{ + key: "logout", + icon: , + label: ( + {intl.formatMessage({ id: 'menu.account.logout', defaultMessage: 'logout', })} - - - ); + + ), + onClick: onMenuClick + }]; const className = styles.right; - // if (theme === 'dark') { - // className = `${styles.right} ${styles.dark}`; - // } + return (
- {currentUser.username ? ( - + {currentUser.id ? ( + - {currentUser.username} + {currentUser.email} ) : ( diff --git a/src/dashboard/src/components/GlobalHeader/index.less b/src/dashboard/src/components/GlobalHeader/index.less index bac55dc22..3679c4fcd 100644 --- a/src/dashboard/src/components/GlobalHeader/index.less +++ b/src/dashboard/src/components/GlobalHeader/index.less @@ -24,13 +24,16 @@ } } -.menu { - :global(.anticon) { - margin-right: 8px; - } - :global(.ant-dropdown-menu-item) { - min-width: 160px; - } +:global(.anticon) { + margin-right: 8px; +} + +:global(.ant-dropdown-menu) { + width: 120px; +} + +:global(.ant-dropdown-menu-item) { + width: 120px; } .trigger { diff --git a/src/dashboard/src/components/HeaderDropdown/index.js b/src/dashboard/src/components/HeaderDropdown/index.js index a19c471ac..b81002064 100644 --- a/src/dashboard/src/components/HeaderDropdown/index.js +++ b/src/dashboard/src/components/HeaderDropdown/index.js @@ -5,9 +5,9 @@ import styles from './index.less'; export default class HeaderDropdown extends PureComponent { render() { - const { overlayClassName, ...props } = this.props; + const { className, ...props } = this.props; return ( - + ); } } diff --git a/src/dashboard/src/components/SelectLang/index.js b/src/dashboard/src/components/SelectLang/index.js index 11463fd40..c96f8cdc7 100644 --- a/src/dashboard/src/components/SelectLang/index.js +++ b/src/dashboard/src/components/SelectLang/index.js @@ -1,6 +1,5 @@ import React, { PureComponent } from 'react'; import { injectIntl, setLocale, getLocale } from 'umi'; -import { Menu } from 'antd'; import { GlobalOutlined } from '@ant-design/icons'; import classNames from 'classnames'; import HeaderDropdown from '../HeaderDropdown'; @@ -14,7 +13,6 @@ class SelectLang extends PureComponent { render() { const { className, intl } = this.props; - const selectedLang = getLocale(); const locales = ['zh-CN', 'en-US']; const languageLabels = { 'zh-CN': '简体中文', @@ -24,22 +22,23 @@ class SelectLang extends PureComponent { 'zh-CN': '🇨🇳', 'en-US': '🇬🇧', }; - const langMenu = ( - - {locales.map(locale => ( - - - {languageIcons[locale]} - {' '} - {languageLabels[locale]} - - ))} - - ); + const langMenuItems = locales.map(locale => ({ + key: locale, + label: ( + + {languageIcons[locale]} {languageLabels[locale]} + + ), + onClick: this.changeLang + })); return ( - + - + + {intl.formatMessage({ id: 'navBar.lang' })} ); diff --git a/src/dashboard/src/components/SelectLang/index.less b/src/dashboard/src/components/SelectLang/index.less index 9f41ade9a..9894b3098 100644 --- a/src/dashboard/src/components/SelectLang/index.less +++ b/src/dashboard/src/components/SelectLang/index.less @@ -1,14 +1,5 @@ @import '~antd/lib/style/themes/default.less'; -.menu { - :global(.anticon) { - margin-right: 8px; - } - :global(.ant-dropdown-menu-item) { - min-width: 160px; - } -} - .dropDown { line-height: @layout-header-height; vertical-align: top; diff --git a/src/dashboard/src/pages/User/Login.js b/src/dashboard/src/pages/User/Login.js index e7a4412d9..70b5d6c0a 100644 --- a/src/dashboard/src/pages/User/Login.js +++ b/src/dashboard/src/pages/User/Login.js @@ -68,133 +68,146 @@ class LoginPage extends Component { const { login, submitting, intl, registering } = this.props; const { type, autoLogin } = this.state; const { success, registerMsg } = login.register; - return ( -
- - - - {login.status === 'error' && - login.type === 'account' && - !submitting && - this.renderMessage( - intl.formatMessage({ id: 'app.login.message-invalid-credentials' }) - )} - - -
- - {intl.formatMessage({ - id: 'app.login.remember-me', - })} - -
- + + const tabItems = [ + { + key: '1', + label: intl.formatMessage({ id: 'app.login.login' }), + children: ( + + {login.status === 'error' && + login.type === 'account' && + !submitting && + this.renderMessage( + intl.formatMessage({ id: 'app.login.message-invalid-credentials' }) + )} + + +
+ {intl.formatMessage({ - id: 'app.login.login', + id: 'app.login.remember-me', })} - - - - - - {!registering && - registerMsg !== '' && - this.renderMessage({ type: success ? 'success' : 'error', message: registerMsg })} - - - - - +
+ + {intl.formatMessage({ + id: 'app.login.login', + })} + +
+ ) + }, + { + key: '2', + label: intl.formatMessage({ id: 'app.register.register' }), + children: ( + + {!registering && + registerMsg !== '' && + this.renderMessage({ type: success ? 'success' : 'error', message: registerMsg })} + + + - + + + ({ + validator(role, value) { + if (value !== getFieldValue('password')) { + return Promise.reject( + intl.formatMessage({ id: 'validation.password.twice' }) + ); + } + return Promise.resolve(); }, - ]} - /> - ({ - validator(role, value) { - if (value !== getFieldValue('password')) { - return Promise.reject( - intl.formatMessage({ id: 'validation.password.twice' }) - ); - } - return Promise.resolve(); - }, - }), - ]} - /> - - {intl.formatMessage({ - id: 'app.register.register', - })} - - -
+ }), + ]} + /> + + {intl.formatMessage({ + id: 'app.register.register', + })} + + + ) + } + ] + + + return ( +
+
); From 159aab4e02e91e7238a227e73c9f46e21605bcf6 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 13:57:33 +0800 Subject: [PATCH 19/67] Fix isLogin logic Use User ID instead of Username to judge isLogin. Signed-off-by: dodo920306 --- src/dashboard/src/layouts/SecurityLayout.jsx | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/dashboard/src/layouts/SecurityLayout.jsx b/src/dashboard/src/layouts/SecurityLayout.jsx index 7676f4ff5..1d9998cc4 100644 --- a/src/dashboard/src/layouts/SecurityLayout.jsx +++ b/src/dashboard/src/layouts/SecurityLayout.jsx @@ -23,14 +23,11 @@ class SecurityLayout extends React.Component { render() { const { isReady } = this.state; - const { children, loading, currentUser } = this.props; // You can replace it to your authentication rule (such as check token exists) - // 你可以把它替换成你自己的登录认证规则(比如判断 token 是否存在) - - const isLogin = currentUser && currentUser.username; + const { children, loading, currentUser } = this.props; + const isLogin = currentUser && currentUser.id; const queryString = stringify({ redirect: window.location.href, }); - if ((!isLogin && loading) || !isReady) { return ; } From 02c29793d488993ac43633f4ab6ce3bb8c5acf36 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 14:14:29 +0800 Subject: [PATCH 20/67] Fix failed to acquire organizations Signed-off-by: dodo920306 --- src/api-engine/organization/views.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/api-engine/organization/views.py b/src/api-engine/organization/views.py index 2f2cbf8ed..b538aff59 100644 --- a/src/api-engine/organization/views.py +++ b/src/api-engine/organization/views.py @@ -31,15 +31,19 @@ class OrganizationViewSet(viewsets.ViewSet): def list(self, request): serializer = PageQuerySerializer(data=request.GET) p = serializer.get_paginator(Organization.objects.all()) - response = OrganizationList( - data={ - "total": p.count, - "data": OrganizationResponse(p.page(serializer.data.page).object_list, many=True).data - } - ) - response.is_valid(raise_exception=True) + response = OrganizationList({ + "total": p.count, + "data": OrganizationResponse(p.page(serializer.data["page"]).object_list, many=True).data + }) return Response( - ok(response.data), status=status.HTTP_200_OK + status=status.HTTP_200_OK, + data=ok(OrganizationList({ + "total": p.count, + "data": OrganizationResponse( + p.page(serializer.data["page"]).object_list, + many=True + ).data + }).data) ) @swagger_auto_schema( From 93bd4244b0e16f3bd7b8da9bf2fcea7f77461862 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 14:23:49 +0800 Subject: [PATCH 21/67] Remove New Organization button Fix #734. Signed-off-by: dodo920306 --- src/dashboard/src/pages/Organization/Organization.js | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/dashboard/src/pages/Organization/Organization.js b/src/dashboard/src/pages/Organization/Organization.js index 3240e4c4f..c47f19cee 100644 --- a/src/dashboard/src/pages/Organization/Organization.js +++ b/src/dashboard/src/pages/Organization/Organization.js @@ -389,12 +389,6 @@ class Organization extends PureComponent { >
-
- -
Date: Mon, 29 Sep 2025 14:25:08 +0800 Subject: [PATCH 22/67] Remove org operations temporarily The update/delete organizations feature isn't ready yet, so they're disabled in this commit but will be added back in the future. Signed-off-by: dodo920306 --- src/dashboard/src/pages/Organization/Organization.js | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/dashboard/src/pages/Organization/Organization.js b/src/dashboard/src/pages/Organization/Organization.js index c47f19cee..4938ce091 100644 --- a/src/dashboard/src/pages/Organization/Organization.js +++ b/src/dashboard/src/pages/Organization/Organization.js @@ -355,13 +355,13 @@ class Organization extends PureComponent { }), render: (text, record) => ( - this.showUpdate(record)}> + {/* this.showUpdate(record)}> {intl.formatMessage({ id: 'form.menu.item.update', defaultMessage: 'Update' })} - + */} - this.handleDelete(record)}> + {/* this.handleDelete(record)}> {intl.formatMessage({ id: 'form.menu.item.delete', defaultMessage: 'Delete' })} - + */} ), }, From cbe99b97b37d3bf474918e8e9f2a4cfd0025f857 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 14:28:52 +0800 Subject: [PATCH 23/67] Disable agents for now Agents "may" be deleted in the future. Signed-off-by: dodo920306 --- src/dashboard/config/router.config.js | 36 +++++++++++++-------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/src/dashboard/config/router.config.js b/src/dashboard/config/router.config.js index 952e7d49f..af42b8a3e 100755 --- a/src/dashboard/config/router.config.js +++ b/src/dashboard/config/router.config.js @@ -38,24 +38,24 @@ export default [ icon: 'team', component: './Organization/Organization', }, - { - path: '/agent', - name: 'agent', - icon: 'agent', - component: './Agent/Agent', - }, - { - path: '/agent/newAgent', - name: 'newAgent', - component: './Agent/newAgent', - hideInMenu: true, - }, - { - path: '/agent/editAgent', - name: 'editAgent', - component: './Agent/newAgent', - hideInMenu: true, - }, + // { + // path: '/agent', + // name: 'agent', + // icon: 'agent', + // component: './Agent/Agent', + // }, + // { + // path: '/agent/newAgent', + // name: 'newAgent', + // component: './Agent/newAgent', + // hideInMenu: true, + // }, + // { + // path: '/agent/editAgent', + // name: 'editAgent', + // component: './Agent/newAgent', + // hideInMenu: true, + // }, { path: '/node', name: 'node', From 4902ff757b3ea52713b725ca6b1106f6522e8825 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 14:29:07 +0800 Subject: [PATCH 24/67] Fix invalid path of nodes Signed-off-by: dodo920306 --- src/api-engine/api_engine/urls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api-engine/api_engine/urls.py b/src/api-engine/api_engine/urls.py index 4938165ad..48b64f882 100644 --- a/src/api-engine/api_engine/urls.py +++ b/src/api-engine/api_engine/urls.py @@ -52,7 +52,7 @@ router = DefaultRouter(trailing_slash=False) router.register("organizations", OrganizationViewSet, basename="organization") router.register("users", UserViewSet, basename="user") -router.register("node", NodeViewSet, basename="node") +router.register("nodes", NodeViewSet, basename="node") router.register("register", RegisterViewSet, basename="register") router.register("channels", ChannelViewSet, basename="channel") router.register("chaincodes", ChaincodeViewSet, basename="chaincode") From 1d4a9034a9b7582f5b7b7a99a606f1aedb6fb3f6 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 14:42:24 +0800 Subject: [PATCH 25/67] Fix failed to get nodes Signed-off-by: dodo920306 --- src/dashboard/src/pages/Node/index.js | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/src/dashboard/src/pages/Node/index.js b/src/dashboard/src/pages/Node/index.js index 7c01df570..8734f0d6b 100644 --- a/src/dashboard/src/pages/Node/index.js +++ b/src/dashboard/src/pages/Node/index.js @@ -27,14 +27,6 @@ import styles from '../styles.less'; const FormItem = Form.Item; const { Option } = Select; -// function str2bytes (str) { -// var bytes = new Uint8Array(str.length); -// for (var i=0; i { const { registerUserFormVisible, @@ -630,7 +622,7 @@ class Index extends PureComponent { const menu = record => ( - {record.type === 'ca' && ( + {record.type.toLowerCase() === 'ca' && ( this.handleRegisterUser(record)}> {intl.formatMessage({ @@ -640,14 +632,14 @@ class Index extends PureComponent { )} - {(record.type === 'peer' || record.type === 'orderer') && ( + {(record.type.toLowerCase() === 'peer' || record.type.toLowerCase() === 'orderer') && ( this.handleDownloadConfig(record)}> {intl.formatMessage({ id: 'form.menu.item.download', defaultMessage: 'Download' })} )} - {(record.type === 'peer' || record.type === 'orderer') && ( + {(record.type.toLowerCase() === 'peer' || record.type.toLowerCase() === 'orderer') && ( )} - {record.type === 'peer' && ( + {record.type.toLowerCase() === 'peer' && ( text.toLowerCase() }, { title: intl.formatMessage({ @@ -749,7 +742,7 @@ class Index extends PureComponent { defaultMessage: 'Status', }), dataIndex: 'status', - render: text => , + render: text => , }, { title: intl.formatMessage({ @@ -758,7 +751,7 @@ class Index extends PureComponent { }), render: (text, record) => ( - {record.status === 'running' && ( + {record.status.toLowerCase() === 'running' && ( this.operationForNode('stop', record)}> {intl.formatMessage({ id: 'app.node.table.operation.stop', @@ -766,7 +759,7 @@ class Index extends PureComponent { })} )} - {record.status === 'stopped' && ( + {record.status.toLowerCase() === 'stopped' && ( this.operationForNode('start', record)}> {intl.formatMessage({ From d49e811e3d6a68ec7cd355e5e7b7e5f527d189dd Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 14:44:20 +0800 Subject: [PATCH 26/67] Disable network pages "Network" may be deleted in the future, so here the related pages are disabled. Signed-off-by: dodo920306 --- src/dashboard/config/router.config.js | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/dashboard/config/router.config.js b/src/dashboard/config/router.config.js index af42b8a3e..f404b7bc7 100755 --- a/src/dashboard/config/router.config.js +++ b/src/dashboard/config/router.config.js @@ -84,18 +84,18 @@ export default [ }, ], }, - { - path: '/network', - name: 'network', - icon: 'network', - component: './Network/Network', - }, - { - path: '/network/newNetwork', - name: 'newNetwork', - component: './Network/newNetwork', - hideInMenu: true, - }, + // { + // path: '/network', + // name: 'network', + // icon: 'network', + // component: './Network/Network', + // }, + // { + // path: '/network/newNetwork', + // name: 'newNetwork', + // component: './Network/newNetwork', + // hideInMenu: true, + // }, { path: '/channel', name: 'channel', From 4b4450e81a1db932b640a29e1bcf4ebc5ac41716 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 15:07:04 +0800 Subject: [PATCH 27/67] Fix failed to show channels Signed-off-by: dodo920306 --- src/dashboard/src/models/channel.js | 4 ++-- src/dashboard/src/pages/Channel/Channel.js | 25 +++++++++++++++------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/src/dashboard/src/models/channel.js b/src/dashboard/src/models/channel.js index 8de345097..a90fd46dc 100644 --- a/src/dashboard/src/models/channel.js +++ b/src/dashboard/src/models/channel.js @@ -20,14 +20,14 @@ export default { const pageSize = payload ? payload.per_page || pagination.pageSize : pagination.pageSize; const current = payload ? payload.page || pagination.current : pagination.current; - pagination.total = response.data.total; + pagination.total = response.total; pagination.pageSize = pageSize; pagination.current = current; yield put({ type: 'save', payload: { pagination, - channels: response.data.data, + channels: response.data, }, }); }, diff --git a/src/dashboard/src/pages/Channel/Channel.js b/src/dashboard/src/pages/Channel/Channel.js index 6886d55ab..9381b3564 100644 --- a/src/dashboard/src/pages/Channel/Channel.js +++ b/src/dashboard/src/pages/Channel/Channel.js @@ -372,10 +372,19 @@ class Channel extends PureComponent { } fetchChannels = () => { - const { dispatch } = this.props; + const { + dispatch, + channel: { pagination }, + } = this.props; + const { formValues } = this.state; dispatch({ type: 'channel/listChannel', + payload: { + ...formValues, + per_page: pagination.pageSize, + page: pagination.current, + }, }); dispatch({ @@ -526,13 +535,13 @@ class Channel extends PureComponent { }), dataIndex: 'name', }, - { - title: intl.formatMessage({ - id: 'app.channel.table.header.network', - defaultMessage: 'Network', - }), - render: (text, record) => record.network.name, - }, + // { + // title: intl.formatMessage({ + // id: 'app.channel.table.header.network', + // defaultMessage: 'Network', + // }), + // render: (text, record) => record.network.name, + // }, { title: intl.formatMessage({ id: 'form.table.header.operation', From bce665a56c33895157a858f5dda658680d68154d Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 15:08:13 +0800 Subject: [PATCH 28/67] Disable channel operations They will be opened once they're ready. Signed-off-by: dodo920306 --- src/dashboard/src/pages/Channel/Channel.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/dashboard/src/pages/Channel/Channel.js b/src/dashboard/src/pages/Channel/Channel.js index 9381b3564..c0ac00140 100644 --- a/src/dashboard/src/pages/Channel/Channel.js +++ b/src/dashboard/src/pages/Channel/Channel.js @@ -550,13 +550,13 @@ class Channel extends PureComponent { // eslint-disable-next-line no-unused-vars render: (text, record) => ( - this.onUpdateChannel(record)}> + {/* this.onUpdateChannel(record)}> {intl.formatMessage({ id: 'form.menu.item.update', defaultMessage: 'Update' })} this.handleDownloadConfig(record)}> {intl.formatMessage({ id: 'form.menu.item.download', defaultMessage: 'Download' })} - + */} ), }, From 0a8cea37f5e7092b2edc92a980c76bb7d2e62ca1 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 29 Sep 2025 15:21:28 +0800 Subject: [PATCH 29/67] Fix failed to get channels for chaincodes Signed-off-by: dodo920306 --- src/dashboard/src/pages/ChainCode/forms/ApproveForm.js | 6 +++--- src/dashboard/src/pages/ChainCode/forms/CommitForm.js | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/dashboard/src/pages/ChainCode/forms/ApproveForm.js b/src/dashboard/src/pages/ChainCode/forms/ApproveForm.js index 3870ac6a8..1121eb8e5 100644 --- a/src/dashboard/src/pages/ChainCode/forms/ApproveForm.js +++ b/src/dashboard/src/pages/ChainCode/forms/ApproveForm.js @@ -22,9 +22,9 @@ const ApproveForm = props => { useEffect(() => { async function fetchData() { const response = await listChannel(); - const newChannels = Object.keys(response.data.data).map(item => ({ - label: response.data.data[item].name, - value: response.data.data[item].name, + const newChannels = Object.keys(response.data).map(item => ({ + label: response.data[item].name, + value: response.data[item].name, })); setChannels(newChannels); } diff --git a/src/dashboard/src/pages/ChainCode/forms/CommitForm.js b/src/dashboard/src/pages/ChainCode/forms/CommitForm.js index b754485e4..426c5afe0 100644 --- a/src/dashboard/src/pages/ChainCode/forms/CommitForm.js +++ b/src/dashboard/src/pages/ChainCode/forms/CommitForm.js @@ -23,9 +23,9 @@ const CommitForm = props => { async function fetchData() { try { const response = await listChannel(); - const newChannels = Object.keys(response.data.data).map(item => ({ - label: response.data.data[item].name, - value: response.data.data[item].name, + const newChannels = Object.keys(response.data).map(item => ({ + label: response.data[item].name, + value: response.data[item].name, })); setChannels(newChannels); } catch (error) { From 8fb225d93451bda90d75666fe529bd5034df7602 Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Sun, 5 Oct 2025 12:02:33 +0800 Subject: [PATCH 30/67] Fix failed to get user lists Signed-off-by: dodo920306 --- src/api-engine/auth/views.py | 2 +- src/api-engine/organization/views.py | 23 +++++++++++++++---- src/api-engine/user/serializers.py | 4 ++-- src/api-engine/user/views.py | 12 ++++------ src/dashboard/src/models/organization.js | 7 ++++++ .../pages/UserManagement/UserManagement.js | 12 ++++------ src/dashboard/src/services/organization.js | 4 ++++ 7 files changed, 41 insertions(+), 23 deletions(-) diff --git a/src/api-engine/auth/views.py b/src/api-engine/auth/views.py index 03a75cbaa..5f8bf1cbf 100644 --- a/src/api-engine/auth/views.py +++ b/src/api-engine/auth/views.py @@ -90,7 +90,7 @@ def post(self, request, *args, **kwargs): token=serializer.validated_data["token"], ) user = UserProfile.objects.get(pk=access_token["user_id"]) - except Union[TokenError, UserProfile.DoesNotExist]: + except (TokenError, UserProfile.DoesNotExist): LOG.exception("invalid token error") return Response( data=err(msg="invalid token"), diff --git a/src/api-engine/organization/views.py b/src/api-engine/organization/views.py index b538aff59..ebb65a97c 100644 --- a/src/api-engine/organization/views.py +++ b/src/api-engine/organization/views.py @@ -21,6 +21,25 @@ class OrganizationViewSet(viewsets.ViewSet): """Class represents organization related operations.""" permission_classes = [IsAuthenticated] + @swagger_auto_schema( + operation_summary="Get Organization", + responses=with_common_response( + {status.HTTP_200_OK: make_response_serializer(OrganizationResponse)} + ), + ) + def retrieve(self, request, pk=None): + try: + res = Organization.objects.get(pk=pk) + except Organization.DoesNotExist: + return Response( + status=status.HTTP_404_NOT_FOUND, + data=err("Organization not found") + ) + return Response( + status=status.HTTP_200_OK, + data=ok(OrganizationResponse(res).data) + ) + @swagger_auto_schema( operation_summary="Get Organizations", query_serializer=PageQuerySerializer(), @@ -31,10 +50,6 @@ class OrganizationViewSet(viewsets.ViewSet): def list(self, request): serializer = PageQuerySerializer(data=request.GET) p = serializer.get_paginator(Organization.objects.all()) - response = OrganizationList({ - "total": p.count, - "data": OrganizationResponse(p.page(serializer.data["page"]).object_list, many=True).data - }) return Response( status=status.HTTP_200_OK, data=ok(OrganizationList({ diff --git a/src/api-engine/user/serializers.py b/src/api-engine/user/serializers.py index de67275e7..726d86405 100644 --- a/src/api-engine/user/serializers.py +++ b/src/api-engine/user/serializers.py @@ -5,7 +5,7 @@ from rest_framework import serializers from api.common.serializers import ListResponseSerializer -from organization.serializeres import OrganizationID +from organization.serializeres import OrganizationID, OrganizationResponse from user.models import UserProfile @@ -39,7 +39,7 @@ class Meta: class UserInfo(serializers.ModelSerializer): - organization = OrganizationID() + organization = OrganizationResponse() class Meta: model = UserProfile fields = ( diff --git a/src/api-engine/user/views.py b/src/api-engine/user/views.py index 586ef9bbc..c22b4391b 100644 --- a/src/api-engine/user/views.py +++ b/src/api-engine/user/views.py @@ -42,19 +42,15 @@ class UserViewSet(viewsets.ViewSet): def list(self, request: Request) -> Response: serializer = PageQuerySerializer(data=request.GET) p = serializer.get_paginator(UserProfile.objects.filter(organization=request.user.organization)) - - response = UserList( - data = { + return Response( + status = status.HTTP_200_OK, + data = ok(UserList({ "total": p.count, "data": UserInfo( p.page(serializer.data['page']).object_list, many=True ).data, - }) - response.is_valid(raise_exception=True) - return Response( - status = status.HTTP_200_OK, - data = ok(response.data), + }).data), ) @swagger_auto_schema( diff --git a/src/dashboard/src/models/organization.js b/src/dashboard/src/models/organization.js index bb7a7d3fd..711b63035 100644 --- a/src/dashboard/src/models/organization.js +++ b/src/dashboard/src/models/organization.js @@ -1,4 +1,5 @@ import { + getOrganization, listOrganization, createOrganization, updateOrganization, @@ -19,6 +20,12 @@ export default { }, effects: { + *getOrganization({ payload, callback }, { call }) { + const response = yield call(getOrganization, payload); + if (callback) { + callback(response); + } + }, *listOrganization({ payload, callback }, { call, put, select }) { const response = yield call(listOrganization, payload); const pagination = yield select(state => state.organization.pagination); diff --git a/src/dashboard/src/pages/UserManagement/UserManagement.js b/src/dashboard/src/pages/UserManagement/UserManagement.js index 324d2b3a5..6cde73661 100644 --- a/src/dashboard/src/pages/UserManagement/UserManagement.js +++ b/src/dashboard/src/pages/UserManagement/UserManagement.js @@ -477,24 +477,20 @@ class UserManagement extends PureComponent { render() { const { modalVisible, modalMethod, selectedRows } = this.state; const { - user: { users, pagination, currentUser }, + user: { users, pagination }, organization: { organizations }, loadingUsers, creatingUser, dispatch, intl, } = this.props; - const data = users.map(user => ({ - ...user, - disabled: user.username === currentUser.username, - })); const columns = [ { title: intl.formatMessage({ id: 'app.user.table.header.name', defaultMessage: 'User Name', }), - dataIndex: 'username', + dataIndex: 'email', }, { title: intl.formatMessage({ @@ -504,7 +500,7 @@ class UserManagement extends PureComponent { dataIndex: 'role', render: text => intl.formatMessage({ - id: `app.user.role.${text}`, + id: `app.user.role.${text.toLowerCase()}`, defaultMessage: 'User', }), }, @@ -610,7 +606,7 @@ class UserManagement extends PureComponent { loading={loadingUsers} rowKey="id" data={{ - list: data, + list: users.data, pagination, }} columns={columns} diff --git a/src/dashboard/src/services/organization.js b/src/dashboard/src/services/organization.js index 47a01ac8e..133d64131 100644 --- a/src/dashboard/src/services/organization.js +++ b/src/dashboard/src/services/organization.js @@ -1,6 +1,10 @@ import { stringify } from 'qs'; import request from '@/utils/request'; +export async function getOrganization(params) { + return request(`/api/v1/organizations?${params.id}`); +} + export async function listOrganization(params) { return request(`/api/v1/organizations?${stringify(params)}`); } From b88aad8727beb2de42e20cddb044b91231f8d11d Mon Sep 17 00:00:00 2001 From: dodo920306 Date: Mon, 6 Oct 2025 15:40:24 +0800 Subject: [PATCH 31/67] Finish Chaincode uploading Signed-off-by: dodo920306 --- bootup/kubernetes/Makefile | 104 ----- .../templates/api-engine-tasks/deploy.tmpl | 40 -- .../templates/api-engine/config.tmpl | 18 - .../templates/api-engine/deploy-dev.tmpl | 40 -- .../templates/api-engine/deploy.tmpl | 27 -- .../templates/api-engine/ingress.tmpl | 15 - .../templates/api-engine/service.tmpl | 14 - bootup/kubernetes/templates/nginx/config.tmpl | 7 - bootup/kubernetes/templates/nginx/deploy.tmpl | 20 - .../kubernetes/templates/nginx/ingress.tmpl | 15 - .../kubernetes/templates/nginx/service.tmpl | 14 - .../kubernetes/templates/postgres/config.tmpl | 8 - .../kubernetes/templates/postgres/deploy.tmpl | 26 -- .../templates/postgres/service.tmpl | 14 - bootup/kubernetes/templates/redis/deploy.tmpl | 24 -- .../kubernetes/templates/redis/service.tmpl | 14 - docker-compose.dev.yaml | 66 ++++ src/agent/docker-rest-agent/README.md | 7 - src/agent/docker-rest-agent/gunicorn.conf.py | 3 - .../intergration-test/block.zip | Bin 762 -> 0 bytes .../intergration-test/msp.zip | Bin 158 -> 0 bytes .../intergration-test/orderer_config.zip | Bin 4594 -> 0 bytes .../intergration-test/peer_config.zip | Bin 10306 -> 0 bytes .../intergration-test/test.py | 86 ---- .../intergration-test/tls.zip | Bin 158 -> 0 bytes src/agent/docker-rest-agent/pip.conf | 5 - src/agent/docker-rest-agent/requirements.txt | 4 - src/agent/docker-rest-agent/server.py | 211 ---------- src/agent/k8s-rest-agent/Dockerfile | 24 -- src/agent/k8s-rest-agent/entrypoint.sh | 11 - src/agent/k8s-rest-agent/pip/pip.conf | 5 - src/agent/k8s-rest-agent/requirements.txt | 18 - src/agent/k8s-rest-agent/src/api/__init__.py | 0 src/agent/k8s-rest-agent/src/api/admin.py | 3 - src/agent/k8s-rest-agent/src/api/apps.py | 5 - src/agent/k8s-rest-agent/src/api/auth.py | 0 .../src/api/management/__init__.py | 0 .../src/api/management/commands/__init__.py | 0 .../src/api/management/commands/test_task.py | 14 - .../src/api/migrations/__init__.py | 0 .../k8s-rest-agent/src/api/models/__init__.py | 1 - .../k8s-rest-agent/src/api/models/user.py | 42 -- .../k8s-rest-agent/src/api/routes/__init__.py | 0 .../src/api/routes/hello/__init__.py | 0 .../src/api/routes/hello/views.py | 37 -- .../k8s-rest-agent/src/api/tasks/__init__.py | 1 - .../src/api/tasks/task/__init__.py | 0 .../src/api/tasks/task/example.py | 12 - src/agent/k8s-rest-agent/src/api/tests.py | 3 - .../k8s-rest-agent/src/api/utils/__init__.py | 0 .../src/api/utils/common/__init__.py | 2 - .../k8s-rest-agent/src/api/utils/common/db.py | 26 -- .../src/api/utils/common/swagger.py | 62 --- .../src/api/utils/db_functions.py | 14 - .../k8s-rest-agent/src/api/utils/enums.py | 102 ----- .../src/api/utils/exception_handler.py | 34 -- .../k8s-rest-agent/src/api/utils/fast_enum.py | 283 ------------- src/agent/k8s-rest-agent/src/api/utils/jwt.py | 30 -- .../k8s-rest-agent/src/api/utils/mixins.py | 12 - .../src/api/utils/serializers.py | 33 -- src/agent/k8s-rest-agent/src/api/views.py | 3 - src/agent/k8s-rest-agent/src/manage.py | 21 - .../k8s-rest-agent/src/server/__init__.py | 7 - src/agent/k8s-rest-agent/src/server/asgi.py | 16 - src/agent/k8s-rest-agent/src/server/celery.py | 15 - .../k8s-rest-agent/src/server/settings.py | 224 ----------- src/agent/k8s-rest-agent/src/server/urls.py | 70 ---- src/agent/k8s-rest-agent/src/server/wsgi.py | 16 - .../supervisor/conf.d/server.conf | 8 - .../supervisor/supervisord.conf | 28 -- src/agent/k8s-rest-agent/uwsgi/server.ini | 71 ---- src/agent/kubernetes-agent/Dockerfile | 15 - src/agent/kubernetes-agent/requirements.txt | 2 - src/agent/kubernetes-agent/src/__init__.py | 3 - src/agent/kubernetes-agent/src/main.py | 13 - .../kubernetes-agent/src/network/__init__.py | 4 - .../src/network/fabric/__init__.py | 283 ------------- .../src/network/fabric/peer_initial_ca.sh | 22 -- .../src/operations/__init__.py | 6 - .../src/operations/create_node.py | 161 -------- .../src/operations/delete_node.py | 65 --- .../src/operations/fabric_ca_register.py | 82 ---- .../src/operations/fabric_ca_register.sh | 23 -- .../kubernetes-agent/src/utils/__init__.py | 33 -- .../kubernetes-agent/src/utils/client.py | 373 ------------------ .../kubernetes-agent/src/utils/download.py | 22 -- src/agent/kubernetes-agent/src/utils/env.py | 83 ---- src/api-engine/.dockerignore | 2 + src/api-engine/{Dockerfile.in => Dockerfile} | 18 +- src/api-engine/chaincode/serializers.py | 13 + src/api-engine/chaincode/service.py | 2 +- src/api-engine/channel/views.py | 7 +- src/api-engine/entrypoint.sh | 11 +- src/api-engine/node/views.py | 12 +- src/api-engine/requirements.txt | 2 +- src/dashboard/Dockerfile | 14 + src/dashboard/cello.conf | 32 ++ src/dashboard/src/models/chaincode.js | 7 + src/dashboard/src/models/channel.js | 2 +- src/dashboard/src/models/login.js | 5 +- .../src/pages/ChainCode/ChainCode.js | 13 +- .../src/pages/ChainCode/forms/UploadForm.js | 152 ++++++- src/dashboard/src/pages/ChainCode/styles.less | 2 +- src/dashboard/src/pages/Channel/Channel.js | 10 +- src/dashboard/src/pages/Node/index.js | 10 +- src/dashboard/src/pages/User/Login.js | 15 +- src/dashboard/src/services/chaincode.js | 7 + 107 files changed, 338 insertions(+), 3223 deletions(-) delete mode 100644 bootup/kubernetes/Makefile delete mode 100644 bootup/kubernetes/templates/api-engine-tasks/deploy.tmpl delete mode 100644 bootup/kubernetes/templates/api-engine/config.tmpl delete mode 100644 bootup/kubernetes/templates/api-engine/deploy-dev.tmpl delete mode 100644 bootup/kubernetes/templates/api-engine/deploy.tmpl delete mode 100644 bootup/kubernetes/templates/api-engine/ingress.tmpl delete mode 100644 bootup/kubernetes/templates/api-engine/service.tmpl delete mode 100644 bootup/kubernetes/templates/nginx/config.tmpl delete mode 100644 bootup/kubernetes/templates/nginx/deploy.tmpl delete mode 100644 bootup/kubernetes/templates/nginx/ingress.tmpl delete mode 100644 bootup/kubernetes/templates/nginx/service.tmpl delete mode 100644 bootup/kubernetes/templates/postgres/config.tmpl delete mode 100644 bootup/kubernetes/templates/postgres/deploy.tmpl delete mode 100644 bootup/kubernetes/templates/postgres/service.tmpl delete mode 100644 bootup/kubernetes/templates/redis/deploy.tmpl delete mode 100644 bootup/kubernetes/templates/redis/service.tmpl create mode 100644 docker-compose.dev.yaml delete mode 100644 src/agent/docker-rest-agent/README.md delete mode 100644 src/agent/docker-rest-agent/gunicorn.conf.py delete mode 100644 src/agent/docker-rest-agent/intergration-test/block.zip delete mode 100644 src/agent/docker-rest-agent/intergration-test/msp.zip delete mode 100644 src/agent/docker-rest-agent/intergration-test/orderer_config.zip delete mode 100644 src/agent/docker-rest-agent/intergration-test/peer_config.zip delete mode 100644 src/agent/docker-rest-agent/intergration-test/test.py delete mode 100644 src/agent/docker-rest-agent/intergration-test/tls.zip delete mode 100644 src/agent/docker-rest-agent/pip.conf delete mode 100644 src/agent/docker-rest-agent/requirements.txt delete mode 100644 src/agent/docker-rest-agent/server.py delete mode 100644 src/agent/k8s-rest-agent/Dockerfile delete mode 100644 src/agent/k8s-rest-agent/entrypoint.sh delete mode 100644 src/agent/k8s-rest-agent/pip/pip.conf delete mode 100644 src/agent/k8s-rest-agent/requirements.txt delete mode 100644 src/agent/k8s-rest-agent/src/api/__init__.py delete mode 100644 src/agent/k8s-rest-agent/src/api/admin.py delete mode 100644 src/agent/k8s-rest-agent/src/api/apps.py delete mode 100644 src/agent/k8s-rest-agent/src/api/auth.py delete mode 100644 src/agent/k8s-rest-agent/src/api/management/__init__.py delete mode 100644 src/agent/k8s-rest-agent/src/api/management/commands/__init__.py delete mode 100644 src/agent/k8s-rest-agent/src/api/management/commands/test_task.py delete mode 100644 src/agent/k8s-rest-agent/src/api/migrations/__init__.py delete mode 100644 src/agent/k8s-rest-agent/src/api/models/__init__.py delete mode 100644 src/agent/k8s-rest-agent/src/api/models/user.py delete mode 100644 src/agent/k8s-rest-agent/src/api/routes/__init__.py delete mode 100644 src/agent/k8s-rest-agent/src/api/routes/hello/__init__.py delete mode 100644 src/agent/k8s-rest-agent/src/api/routes/hello/views.py delete mode 100644 src/agent/k8s-rest-agent/src/api/tasks/__init__.py delete mode 100644 src/agent/k8s-rest-agent/src/api/tasks/task/__init__.py delete mode 100644 src/agent/k8s-rest-agent/src/api/tasks/task/example.py delete mode 100644 src/agent/k8s-rest-agent/src/api/tests.py delete mode 100644 src/agent/k8s-rest-agent/src/api/utils/__init__.py delete mode 100644 src/agent/k8s-rest-agent/src/api/utils/common/__init__.py delete mode 100644 src/agent/k8s-rest-agent/src/api/utils/common/db.py delete mode 100644 src/agent/k8s-rest-agent/src/api/utils/common/swagger.py delete mode 100644 src/agent/k8s-rest-agent/src/api/utils/db_functions.py delete mode 100644 src/agent/k8s-rest-agent/src/api/utils/enums.py delete mode 100644 src/agent/k8s-rest-agent/src/api/utils/exception_handler.py delete mode 100644 src/agent/k8s-rest-agent/src/api/utils/fast_enum.py delete mode 100644 src/agent/k8s-rest-agent/src/api/utils/jwt.py delete mode 100644 src/agent/k8s-rest-agent/src/api/utils/mixins.py delete mode 100644 src/agent/k8s-rest-agent/src/api/utils/serializers.py delete mode 100644 src/agent/k8s-rest-agent/src/api/views.py delete mode 100755 src/agent/k8s-rest-agent/src/manage.py delete mode 100644 src/agent/k8s-rest-agent/src/server/__init__.py delete mode 100644 src/agent/k8s-rest-agent/src/server/asgi.py delete mode 100644 src/agent/k8s-rest-agent/src/server/celery.py delete mode 100644 src/agent/k8s-rest-agent/src/server/settings.py delete mode 100644 src/agent/k8s-rest-agent/src/server/urls.py delete mode 100644 src/agent/k8s-rest-agent/src/server/wsgi.py delete mode 100644 src/agent/k8s-rest-agent/supervisor/conf.d/server.conf delete mode 100644 src/agent/k8s-rest-agent/supervisor/supervisord.conf delete mode 100644 src/agent/k8s-rest-agent/uwsgi/server.ini delete mode 100644 src/agent/kubernetes-agent/Dockerfile delete mode 100644 src/agent/kubernetes-agent/requirements.txt delete mode 100644 src/agent/kubernetes-agent/src/__init__.py delete mode 100644 src/agent/kubernetes-agent/src/main.py delete mode 100644 src/agent/kubernetes-agent/src/network/__init__.py delete mode 100644 src/agent/kubernetes-agent/src/network/fabric/__init__.py delete mode 100644 src/agent/kubernetes-agent/src/network/fabric/peer_initial_ca.sh delete mode 100644 src/agent/kubernetes-agent/src/operations/__init__.py delete mode 100644 src/agent/kubernetes-agent/src/operations/create_node.py delete mode 100644 src/agent/kubernetes-agent/src/operations/delete_node.py delete mode 100644 src/agent/kubernetes-agent/src/operations/fabric_ca_register.py delete mode 100755 src/agent/kubernetes-agent/src/operations/fabric_ca_register.sh delete mode 100644 src/agent/kubernetes-agent/src/utils/__init__.py delete mode 100644 src/agent/kubernetes-agent/src/utils/client.py delete mode 100644 src/agent/kubernetes-agent/src/utils/download.py delete mode 100644 src/agent/kubernetes-agent/src/utils/env.py create mode 100644 src/api-engine/.dockerignore rename src/api-engine/{Dockerfile.in => Dockerfile} (69%) create mode 100644 src/dashboard/Dockerfile create mode 100644 src/dashboard/cello.conf diff --git a/bootup/kubernetes/Makefile b/bootup/kubernetes/Makefile deleted file mode 100644 index b314c9673..000000000 --- a/bootup/kubernetes/Makefile +++ /dev/null @@ -1,104 +0,0 @@ -init-nginx-yaml: - mkdir -p ${ROOT_PATH}/bootup/kubernetes/nginx - @envsubst < templates/nginx/config.tmpl > nginx/config.yml - @envsubst < templates/nginx/deploy.tmpl > nginx/deploy.yml - @envsubst < templates/nginx/service.tmpl > nginx/service.yml - @envsubst < templates/nginx/ingress.tmpl > nginx/ingress.yml - -init-api-engine-yaml: - rm -rf ${ROOT_PATH}/bootup/kubernetes/api-engine - mkdir -p ${ROOT_PATH}/bootup/kubernetes/api-engine - @envsubst < templates/api-engine/config.tmpl > api-engine/config.yml - @envsubst < templates/api-engine/${DEPLOY_TEMPLATE_NAME} > api-engine/deploy.yml - @envsubst < templates/api-engine/service.tmpl > api-engine/service.yml - if [ "$(MODE)" = "dev" ]; then \ - envsubst < templates/api-engine/ingress.tmpl > api-engine/ingress.yml; \ - fi - -init-api-engine-tasks-yaml: - mkdir -p ${ROOT_PATH}/bootup/kubernetes/api-engine-tasks - @envsubst < templates/api-engine-tasks/deploy.tmpl > api-engine-tasks/deploy.yml - -init-rabbitmq-yaml: - mkdir -p ${ROOT_PATH}/bootup/kubernetes/rabbitmq - @envsubst < templates/rabbitmq/config.tmpl > rabbitmq/config.yml - @envsubst < templates/rabbitmq/deploy.tmpl > rabbitmq/deploy.yml - @envsubst < templates/rabbitmq/service.tmpl > rabbitmq/service.yml - -init-mongo-yaml: - mkdir -p ${ROOT_PATH}/bootup/kubernetes/mongo - @envsubst < templates/mongo/deploy.tmpl > mongo/deploy.yml - @envsubst < templates/mongo/service.tmpl > mongo/service.yml - -init-redis-yaml: - mkdir -p ${ROOT_PATH}/bootup/kubernetes/redis - @envsubst < templates/redis/deploy.tmpl > redis/deploy.yml - @envsubst < templates/redis/service.tmpl > redis/service.yml - -init-operator-dashboard-yaml: - mkdir -p ${ROOT_PATH}/bootup/kubernetes/operator-dashboard - @envsubst < templates/operator-dashboard/config.tmpl > operator-dashboard/config.yml - @envsubst < templates/operator-dashboard/${DEPLOY_TEMPLATE_NAME} > operator-dashboard/deploy.yml - @envsubst < templates/operator-dashboard/service.tmpl > operator-dashboard/service.yml - -init-user-dashboard-yaml: - mkdir -p ${ROOT_PATH}/bootup/kubernetes/user-dashboard - @envsubst < templates/user-dashboard/config.tmpl > user-dashboard/config.yml - @envsubst < templates/user-dashboard/${DEPLOY_TEMPLATE_NAME} > user-dashboard/deploy.yml - @envsubst < templates/user-dashboard/service.tmpl > operator-dashboard/service.yml - @envsubst < templates/user-dashboard/ingress.tmpl > user-dashboard/ingress.yml - -init-parse-server-yaml: - mkdir -p ${ROOT_PATH}/bootup/kubernetes/parse-server - @envsubst < templates/parse-server/config.tmpl > parse-server/config.yml - @envsubst < templates/parse-server/${DEPLOY_TEMPLATE_NAME} > parse-server/deploy.yml - @envsubst < templates/parse-server/service.tmpl > parse-server/service.yml - -init-mysql-yaml: - mkdir -p ${ROOT_PATH}/bootup/kubernetes/mysql - @envsubst < templates/mysql/config.tmpl > mysql/config.yml - @envsubst < templates/mysql/deploy.tmpl > mysql/deploy.yml - @envsubst < templates/mysql/service.tmpl > mysql/service.yml - -init-postgres-yaml: - mkdir -p ${ROOT_PATH}/bootup/kubernetes/postgres - @envsubst < templates/postgres/config.tmpl > postgres/config.yml - @envsubst < templates/postgres/deploy.tmpl > postgres/deploy.yml - @envsubst < templates/postgres/service.tmpl > postgres/service.yml - -init-watchdog-yaml: - mkdir -p ${ROOT_PATH}/bootup/kubernetes/watchdog - @envsubst < templates/watchdog/config.tmpl > watchdog/config.yml - @envsubst < templates/watchdog/${DEPLOY_TEMPLATE_NAME} > watchdog/deploy.yml - -init-keycloak-yaml: - mkdir -p ${ROOT_PATH}/bootup/kubernetes/keycloak - @envsubst < templates/keycloak/config.tmpl > keycloak/config.yml - @envsubst < templates/keycloak/deploy.tmpl > keycloak/deploy.yml - @envsubst < templates/keycloak/service.tmpl > keycloak/service.yml - @envsubst < templates/keycloak/ingress.tmpl > keycloak/ingress.yml - -init-yaml: - @$(MAKE) init-nginx-yaml - @$(MAKE) init-api-engine-yaml - @$(MAKE) init-api-engine-tasks-yaml - @$(MAKE) init-redis-yaml - @$(MAKE) init-postgres-yaml - -start: - @kubectl apply --force -f postgres/ -n ${K8S_DEPLOY_NAMESPACE} - @kubectl apply --force -f redis/ -n ${K8S_DEPLOY_NAMESPACE} - @kubectl apply --force -f api-engine-tasks/ -n ${K8S_DEPLOY_NAMESPACE} - @kubectl apply --force -f api-engine/ -n ${K8S_DEPLOY_NAMESPACE} - if [ "$(MODE)" != "dev" ]; then \ - kubectl apply --force -f nginx/ -n ${K8S_DEPLOY_NAMESPACE}; \ - fi - -stop: - @kubectl delete -f postgres/ -n ${K8S_DEPLOY_NAMESPACE} - @kubectl delete -f redis/ -n ${K8S_DEPLOY_NAMESPACE} - @kubectl delete -f api-engine-tasks/ -n ${K8S_DEPLOY_NAMESPACE} - @kubectl delete -f api-engine/ -n ${K8S_DEPLOY_NAMESPACE} - if [ "$(MODE)" != "dev" ]; then \ - kubectl delete -f nginx/ -n ${K8S_DEPLOY_NAMESPACE}; \ - fi diff --git a/bootup/kubernetes/templates/api-engine-tasks/deploy.tmpl b/bootup/kubernetes/templates/api-engine-tasks/deploy.tmpl deleted file mode 100644 index 10775c9a7..000000000 --- a/bootup/kubernetes/templates/api-engine-tasks/deploy.tmpl +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: ${DEPLOY_API_ENGINE_TASKS_NAME} -spec: - replicas: 1 - template: - metadata: - labels: - app: ${DEPLOY_API_ENGINE_TASKS_NAME} - spec: - containers: - - name: api-engine-tasks - image: hyperledger/cello-api-engine - imagePullPolicy: IfNotPresent - envFrom: - - configMapRef: - name: ${CONFIG_API_ENGINE_NAME} - env: - - name: DEBUG - value: "True" - - name: RUN_MODE - value: "task" - volumeMounts: - - name: api-engine - mountPath: /var/www/server - - name: media - mountPath: /var/www/media - - name: docker-sock - mountPath: /var/run/docker.sock - volumes: - - name: api-engine - hostPath: - path: ${ROOT_PATH}/src/api-engine - - name: media - hostPath: - path: /opt/cello/api-engine/media - - name: docker-sock - hostPath: - path: /var/run/docker.sock diff --git a/bootup/kubernetes/templates/api-engine/config.tmpl b/bootup/kubernetes/templates/api-engine/config.tmpl deleted file mode 100644 index eae112f74..000000000 --- a/bootup/kubernetes/templates/api-engine/config.tmpl +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: ${CONFIG_API_ENGINE_NAME} -data: - WEBROOT: ${API_ENGINE_WEBROOT} - DB_USER: ${POSTGRES_USER} - DB_PASSWORD: ${POSTGRES_PASSWORD} - DB_NAME: api-engine - DB_HOST: ${SERVICE_POSTGRES_NAME} - DB_PORT: "5432" - ADMIN_TOKEN: $API_ENGINE_ADMIN_TOKEN - CELERY_BROKER_URL: "redis://$SERVICE_REDIS_NAME" - ADMIN_USERNAME: $API_ENGINE_ADMIN_USERNAME - ADMIN_PASSWORD: $API_ENGINE_ADMIN_PASSWORD - ADMIN_EMAIL: $API_ENGINE_ADMIN_EMAIL - DOCKER_HOST: "$API_DOCKER_HOST" - API_VERSION: "$API_VERSION" diff --git a/bootup/kubernetes/templates/api-engine/deploy-dev.tmpl b/bootup/kubernetes/templates/api-engine/deploy-dev.tmpl deleted file mode 100644 index 0787b406b..000000000 --- a/bootup/kubernetes/templates/api-engine/deploy-dev.tmpl +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: ${DEPLOY_API_ENGINE_NAME} -spec: - replicas: 1 - template: - metadata: - labels: - app: ${DEPLOY_API_ENGINE_NAME} - spec: - containers: - - name: api-engine - image: hyperledger/cello-api-engine - imagePullPolicy: IfNotPresent - ports: - - containerPort: 8080 - envFrom: - - configMapRef: - name: ${CONFIG_API_ENGINE_NAME} - env: - - name: DEBUG - value: "True" - volumeMounts: - - name: api-engine - mountPath: /var/www/server - - name: media - mountPath: /var/www/media - - name: docker-sock - mountPath: /var/run/docker.sock - volumes: - - name: api-engine - hostPath: - path: ${ROOT_PATH}/src/api-engine - - name: media - hostPath: - path: /opt/cello/api-engine/media - - name: docker-sock - hostPath: - path: /var/run/docker.sock diff --git a/bootup/kubernetes/templates/api-engine/deploy.tmpl b/bootup/kubernetes/templates/api-engine/deploy.tmpl deleted file mode 100644 index 18bc7e1b6..000000000 --- a/bootup/kubernetes/templates/api-engine/deploy.tmpl +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: ${DEPLOY_API_ENGINE_NAME} -spec: - replicas: 1 - template: - metadata: - labels: - app: ${DEPLOY_API_ENGINE_NAME} - spec: - containers: - - name: api-engine - image: hyperledger/cello-api-engine - imagePullPolicy: IfNotPresent - ports: - - containerPort: 8080 - envFrom: - - configMapRef: - name: ${CONFIG_API_ENGINE_NAME} - volumeMounts: - - name: media - mountPath: /var/www/media - volumes: - - name: media - hostPath: - path: /opt/cello/api-engine/media diff --git a/bootup/kubernetes/templates/api-engine/ingress.tmpl b/bootup/kubernetes/templates/api-engine/ingress.tmpl deleted file mode 100644 index ab790f13f..000000000 --- a/bootup/kubernetes/templates/api-engine/ingress.tmpl +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: ${INGRESS_API_ENGINE_NAME} - annotations: - nginx.ingress.kubernetes.io/ssl-redirect: "false" - nginx.ingress.kubernetes.io/force-ssl-redirect: "false" -spec: - rules: - - http: - paths: - - path: ${API_ENGINE_WEBROOT} - backend: - serviceName: ${SERVICE_API_ENGINE_NAME} - servicePort: 8080 diff --git a/bootup/kubernetes/templates/api-engine/service.tmpl b/bootup/kubernetes/templates/api-engine/service.tmpl deleted file mode 100644 index d1c066c97..000000000 --- a/bootup/kubernetes/templates/api-engine/service.tmpl +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: ${SERVICE_API_ENGINE_NAME} - labels: - app: ${SERVICE_API_ENGINE_NAME} -spec: - ports: - - port: 8080 - targetPort: 8080 - protocol: TCP - name: uwsgi - selector: - app: ${DEPLOY_API_ENGINE_NAME} diff --git a/bootup/kubernetes/templates/nginx/config.tmpl b/bootup/kubernetes/templates/nginx/config.tmpl deleted file mode 100644 index 97a78c224..000000000 --- a/bootup/kubernetes/templates/nginx/config.tmpl +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: ${CONFIG_NGINX_NAME} -data: - URL_PREFIX: ${API_ENGINE_WEBROOT} - UWSGI_SERVER_HOST: ${SERVICE_API_ENGINE_NAME} diff --git a/bootup/kubernetes/templates/nginx/deploy.tmpl b/bootup/kubernetes/templates/nginx/deploy.tmpl deleted file mode 100644 index 969524770..000000000 --- a/bootup/kubernetes/templates/nginx/deploy.tmpl +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: ${DEPLOY_NGINX_NAME} -spec: - replicas: 1 - template: - metadata: - labels: - app: ${DEPLOY_NGINX_NAME} - spec: - containers: - - name: nginx - image: hyperledger/cello-nginx - imagePullPolicy: IfNotPresent - ports: - - containerPort: 80 - envFrom: - - configMapRef: - name: ${CONFIG_NGINX_NAME} diff --git a/bootup/kubernetes/templates/nginx/ingress.tmpl b/bootup/kubernetes/templates/nginx/ingress.tmpl deleted file mode 100644 index f575234a7..000000000 --- a/bootup/kubernetes/templates/nginx/ingress.tmpl +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: ${INGRESS_NGINX_NAME} - annotations: - nginx.ingress.kubernetes.io/ssl-redirect: "false" - nginx.ingress.kubernetes.io/force-ssl-redirect: "false" -spec: - rules: - - http: - paths: - - path: ${API_ENGINE_WEBROOT} - backend: - serviceName: ${SERVICE_NGINX_NAME} - servicePort: 80 diff --git a/bootup/kubernetes/templates/nginx/service.tmpl b/bootup/kubernetes/templates/nginx/service.tmpl deleted file mode 100644 index 75a132c40..000000000 --- a/bootup/kubernetes/templates/nginx/service.tmpl +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: ${SERVICE_NGINX_NAME} - labels: - app: ${SERVICE_NGINX_NAME} -spec: - ports: - - port: 80 - targetPort: 80 - protocol: TCP - name: http - selector: - app: ${DEPLOY_NGINX_NAME} diff --git a/bootup/kubernetes/templates/postgres/config.tmpl b/bootup/kubernetes/templates/postgres/config.tmpl deleted file mode 100644 index 4befa0a53..000000000 --- a/bootup/kubernetes/templates/postgres/config.tmpl +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: ${CONFIG_POSTGRES_NAME} -data: - POSTGRES_DB: api-engine - POSTGRES_USER: $POSTGRES_USER - POSTGRES_PASSWORD: $POSTGRES_PASSWORD diff --git a/bootup/kubernetes/templates/postgres/deploy.tmpl b/bootup/kubernetes/templates/postgres/deploy.tmpl deleted file mode 100644 index 2bb3bf9b4..000000000 --- a/bootup/kubernetes/templates/postgres/deploy.tmpl +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: ${DEPLOY_POSTGRES_NAME} -spec: - replicas: 1 - template: - metadata: - labels: - app: ${DEPLOY_POSTGRES_NAME} - spec: - containers: - - name: postgres - image: postgres:11.1 - ports: - - containerPort: 5432 - envFrom: - - configMapRef: - name: ${CONFIG_POSTGRES_NAME} - volumeMounts: - - name: db-path - mountPath: /var/lib/postgresql/data - volumes: - - name: db-path - hostPath: - path: /opt/cello/postgres diff --git a/bootup/kubernetes/templates/postgres/service.tmpl b/bootup/kubernetes/templates/postgres/service.tmpl deleted file mode 100644 index dbbeec8f4..000000000 --- a/bootup/kubernetes/templates/postgres/service.tmpl +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: ${SERVICE_POSTGRES_NAME} - labels: - app: ${SERVICE_POSTGRES_NAME} -spec: - ports: - - port: 5432 - targetPort: 5432 - protocol: TCP - name: postgres - selector: - app: ${DEPLOY_POSTGRES_NAME} diff --git a/bootup/kubernetes/templates/redis/deploy.tmpl b/bootup/kubernetes/templates/redis/deploy.tmpl deleted file mode 100644 index 4b8f98549..000000000 --- a/bootup/kubernetes/templates/redis/deploy.tmpl +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: ${DEPLOY_REDIS_NAME} -spec: - replicas: 1 - template: - metadata: - labels: - app: ${DEPLOY_REDIS_NAME} - spec: - containers: - - name: redis - image: redis:4.0.13 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 6379 - volumeMounts: - - name: redis-data - mountPath: /data - volumes: - - name: redis-data - hostPath: - path: /opt/cello/redis diff --git a/bootup/kubernetes/templates/redis/service.tmpl b/bootup/kubernetes/templates/redis/service.tmpl deleted file mode 100644 index ce9c08b60..000000000 --- a/bootup/kubernetes/templates/redis/service.tmpl +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: ${SERVICE_REDIS_NAME} - labels: - app: ${SERVICE_REDIS_NAME} -spec: - ports: - - port: 6379 - targetPort: 6379 - protocol: TCP - name: redis - selector: - app: ${DEPLOY_REDIS_NAME} \ No newline at end of file diff --git a/docker-compose.dev.yaml b/docker-compose.dev.yaml new file mode 100644 index 000000000..5ff505588 --- /dev/null +++ b/docker-compose.dev.yaml @@ -0,0 +1,66 @@ +services: + cello-dashboard: + build: + context: ./src/dashboard + image: cello/dashboard:latest + container_name: cello-dashboard + restart: unless-stopped + ports: + - "${DASHBOARD_SERVICE_PORT:-8081}:8081" + networks: + - cello-net + depends_on: + - cello-api-engine + + cello-postgres: + image: postgres:12.0 + container_name: cello-postgres + restart: unless-stopped + environment: + - POSTGRES_DB=api_engine + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=123456 + ports: + - "5432:5432" + volumes: + - cello-postgres:/var/lib/postgresql/data + networks: + - cello-net + + cello-api-engine: + build: + context: ./src/api-engine + image: cello/api-engine:latest + container_name: cello-api-engine + restart: unless-stopped + stdin_open: true + dns_search: . + environment: + - GODEBUG=netdns=go + - DB_NAME=api_engine + - DB_USER=postgres + - DB_PASSWORD=123456 + - DB_HOST=cello-postgres + - DB_PORT=5432 + - DEBUG=True + - CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock + - FABRIC_LOGGING_SPEC=INFO + ports: + - "8080:8080" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + - cello-api-engine:/cello + - cello-api-engine-chaincode:/media + networks: + - cello-net + depends_on: + - cello-postgres + +networks: + cello-net: + name: cello-net + +volumes: + cello-postgres: + cello-api-engine: + cello-api-engine-chaincode: diff --git a/src/agent/docker-rest-agent/README.md b/src/agent/docker-rest-agent/README.md deleted file mode 100644 index 61e5854c7..000000000 --- a/src/agent/docker-rest-agent/README.md +++ /dev/null @@ -1,7 +0,0 @@ -## Pre-requisite - -If you want to use agent, first use the make docker-rest-agent command to generate the image; then run it; - - “docker run -p 5001:5001 -e DOCKER_URL="http://x.x.x.x:2375" -d hyperldger/cello-agent-docker:latest” - -When you run it, you must fill in the IP address of your docker server; diff --git a/src/agent/docker-rest-agent/gunicorn.conf.py b/src/agent/docker-rest-agent/gunicorn.conf.py deleted file mode 100644 index 466f9e7a6..000000000 --- a/src/agent/docker-rest-agent/gunicorn.conf.py +++ /dev/null @@ -1,3 +0,0 @@ -workers = 1 -worker_class = "gevent" -bind = "0.0.0.0:5001" diff --git a/src/agent/docker-rest-agent/intergration-test/block.zip b/src/agent/docker-rest-agent/intergration-test/block.zip deleted file mode 100644 index 461b2e8f1134b2cff82435462f8f5122f28593ca..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 762 zcmWIWW@Zs#U|`^2=wcKNoMSs9Ab^R1L5_uifr~+gp(M4qL@y~PKRG)zgp+~!%R0Mw z5H79YW?*D_!OXw_Ce}_mpDpYta(wodyE*Zf%PNzkzdz{L&~?c&zF8vNeL*d7h4|EE zSDX)8-JEOIJp1m3&5YU{Kc#IdUSBL0bP0K}RpVaO_JEAg4IS)`UlQ)NoQ>kw6}i)x z`u%MBz2E!3%X+MOp8qbsxG4VnK3kpM+&4DSx93Iuul@hHyLReZKmYr;?%X=AZ}>x| zd9TEpLyKerl~$?b*Zlap+5F~v!-tZgr*iX_rF~hm&1YuXlM_ali>C-|-jtKIYuU@n zB!Mh}W9y89PA=E3a#L<|TQSdndE~sb(q4hy1znxr{kin#?~8rB{@$I6x&WQ7hrTcG zJ(lk+{iAYlU5m9)p1We%ZPS^jWA1dVn!aGYM=fvazmA6-d?)Vjo|?A#=Ijsg`z|y^ zKHe5^-f55T+0RnNQgVDr{4T~yckJ1hdxd7*J~QjHbM39dI~le=_Hyz~|MaZCqX#lx0r8R5+<}`UNx`>QzLqTh@( zysy8>$lG=5ZEe=2~AX%j<&s+1O9byKS(i?e;$HPmTY1 z{(j$G#PmP3Vcof3%mLnvOmfV)Ql$hiwL-v>Mi7mhMp+?g6fJ!Qc(byBR5Ai#0Fa&n IOyLX+0FnVp>;M1& diff --git a/src/agent/docker-rest-agent/intergration-test/msp.zip b/src/agent/docker-rest-agent/intergration-test/msp.zip deleted file mode 100644 index 5e001613b6d25c2cb0d27f54fdff93065b9e11b8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 158 zcmWIWW@h1H0D;yrkw7p5O0Y1?+WpZV5E_q>XY*kbV z00XHjK3}OTK3{cs3jhHG=mP)%1n2_*0L2?^ZyUGyyMF~?56A-8lI=F_pbZ0LIcbgB zv0%CLfMFP@J4(9wbi5sTcGO?L=Y`~vCn<5dy35+tIg$^b_gDHzOmw|zisj0RtBV`> zTT>6jxGcpTely}u8PzBAq&8xy;cUfHSrnxbrOpyz z)VxBKC z)A0p$68B~@**7T*Fr9I0S9j_^ZP9FVj&~&Rj>Jz&)v_#})CZ&2h}$=b|hl0C{1+E-RH;+_dgsZ(%wT-<_H*Qu=fxSTi=D`3}cE^bYj*=chY> z-vcn+S`Rz-z9S|_7M6t|1sG_p!7l>odL03io_%w*K+a^PHh>v{al`}EK638zkvMml zfjXBhSZ$LEpF9|XLyW*Ni%RDT5tbI9uh(tG`u|+mmC$hKz9a4u(}LHK;P>SMFqQ^X zp}(I|b=aipRpl4$VxgK@@rw$&B|njFRqc9?w=cvRT$x+~mIX6YEz!A&IUSAlYl8f& zOl!I$5;&MQx~>7Sf-RTN;dP6)ERV_C+eTJK5@#cL2^5qaQiWVJ5WW;SOVV5ncbzz5 zqQC>6#t4#Rzsk=zW$5{t2xf~s2UL`?;8Y8<66@BspyPBTD2(~CNC!Q461%W1BK}8D zy_>8)29ev25WuiCSEYUy-vVe|<9QI0S?RXSQ6#D$DnapqF-wJiIR=_>sjcF#J?8|U zHY97rPw zZ2>J%n&@PgCx~1vib}Ck^l}Ume+-7bZl|gpjZxyUb&X4MlAC%N5`)K|QtVTC^ltOX z)0Tse?KZyjEAj)j)|abF&&%(zNM4LA@{8DgP43i(KJwY$1*97WsHnkF+Lg{Zq2!;% zeA9_R&#iR6dmhfr0jEbFMK-{Nwa8Wybbz9>sa1xxLOvj%x+d&1S)mc@3jSUAG)IU- zr121Pr&?ONP%_n+*~7&?dZK9=fK4}}Kj0kS|9#OzqXS>Tg$Q1j;B>(Ht6-gQ7r0%} zZgj2~{AHh(q>dnvfo(UAYl!PMPw^{rbQdi>K6-n91~=j$!CqTHO81LD;&JXFi1yt> zi3C7y(!pW|4+Oj31Vt~CEe3O7Ei7$-WEF#@11D3?qdAnQjU8kZhF%lZA)+H@22A=< zQMNH~z_@O*=<8Aeg9f&BE-7gL;ludi?rQR{$%h~QH67poIAjFur@=<9i3bNhn&Ni9 zy2NA_3=%1!@OgLwLatkTfj1mFwe*-=s)cMzOWurFbLbG7^X@-_4fGKK|J;Dr)6M2B z`tM5HNcB^7`G5)$>Nqgy@&@Ybf+7RRy-|FxbJyO&vu0Dn_ci?7fI5f@vK2+<>7D*j zC~L))^`dVSNCZm<0xU7)4q+HIY{|(9V$>+OJ~$%a z1;p;Wjk4K@rjb~ix=-uXrOy!yY9-hWf^O8ZAg7KC2)-1swBp!s_p^@vlOt0zP;)TH*(`E+n{i#4DSV?kb9l?h)+JDacM|^Xz!F@sV_DXLjU8Gx*w;b?@qr9O^b`k1PJylaZ!>?6hr~sa~9*taolQL zRzj!a;OneDn5|2B8U<-rgO>J1$PqAh<32--p!mIL0q7*;lPpT4-IdFFVV@zLj6iWQ z7S8?gaQ1NcmI`XhgL|pu-yl+SyQ$)diG<)cdl+GKa~C=Aj-nluz=`QkliArBo|=q7 z*P8s9bL*^a(012g{iW)I=#X-Z1;u-abIjR8Uv@p5wi&(+8;FUTE|g2(Uf!}7@P6(` z3AGe@{+Y+PR8OjOt^gz>(Qi|94Y93*hqdLVGdz&gK%iwWT{t%3B7{X<9c)E{b0{jA z)S-^-AIIk?eB@)Mb2fAX2w|~Mmr#$IW6}&=!M&uV2?V5OL{O5?p0<%VfA?MQ%Ihd; z#6-~mxn4J2;lag=?A*H3uO6gqr6G)Sa2<&^k)wYz1jqz9IfAhcQq37Iq>{ov-Aa-K z_j+8Yen)3#eI<>D9;N^R%c&+w$El+F#GvBY{mvjJ1w%mxY>sM1fiu z5;SxilsItEm(kg3=>WRI-?vuiYkEoScaY06%T#SC)c9gH>$-MSO|}hHxUS}+uAVDf zx60Z##;r#+2AWc6yHO+31PlWJ?65sRcBP+3;_Th};NZ1J7PI%0`>WfJBNCOkRym|9 z>BPctoh)QsmlSZc?+$K{EV%q5Ag?mEKqMK*BsV2NvODz&E*DGK(hLC?TY6oh!Dog~ zvKg*6wQ3maVV5QjUF!7utp74(Fw9_^&37DCejF+Z1_I7NLg^)gHSGiy zlIl5D2em@S!Ro+>MaKJp?pnf$=eSFdHFSoWd*sj|7ge929H1hu+XqfbONf~fKrF^G z5D?}0;@^PKNu#Ki?_~D&I7!&O$V?De1TUq`rTPnv9Roz>S#FY!(is(7j6W7ve4JGs zW7IQvZa^ReGq(=t6>g{?ZS320!}El^Q%oj5PV&y_QtDt*!#`Yy;ZYmxUR3%-*J5(} z@gtam_kj#DxiDZ7efwr7O<2N1&JQ%fLf+AY_j@3{KCG;KR|@0<{H!6UL%Ms+T~nXJ zHtIxLsGw?;7$!pTEblL~m2Rvb+5FYbT&%X(T3xv1d;IHPkA1ADwxL|>YPlZ>t}+bB zglthVex`+hm1fr;wN*@VtWO~q=Q;|QOj4(gS%c+rPO~8)V?jHm=KEw>hJGWlNzOIa z?D7c%BsVXhWn|T8s)(;mqy~9NmqzCoMChWs+c2iAYJhL&sP!3V~E*kG@Pnu zkJwn^X_aB4jVCLz@{Oy=5gjKi@Pjk5ddZ`kbzuxu$1|netbg!Paj$~ri$%52vHf*5 zCzPaAaxTH9+xY-EHU!%1VzC(-Ol#y3!wZK2?z3(l-NenLNz^4^9%gZ8(UcJX+<{2< zE)NPgwS7n9mNyxpI?Jw5B#&T_MNX#!9~q%4kt(&Q)J<`3`h~TSmyd%ENXkx zw!z<^6rnDo5l^RsGx%q4itb`nELW~gagUJtW5sUh%-78usMN3>k~P|xZlVF%O34Eq zdPabt4m#?14etLyEdnpNBLQg5p%NbX)>)~SOYixjgwX8G2oX#G!aXksdjC@#7jE8| zFQsi;7(}@Pb^U3uHMDxzZRxm~yflj0X^JH_gzFZP^o+2K zCy{*y2Y8Ordl#xE7hT(_K};)D#d$Qtq`-L&fwMoDeCy+op?2MCbz?5oyj{Nd z>flkU>KP+_8tPc!qF^=6y-=EER*P4l>{s5^+m!-KO8#n zX4-JGJ$i5e6(iP(?qQ32>Oh%3>*8(`P%QHVI9h0TuPl9d_IfMssajqEV-?^=Q;T3Pv3s~O_VzJ z+L$|ldYZc&v2*?iK(XHnUw7wzy?n*b^ZU<}Ajk?IWcR&%OcId@e3Zu#!l5Cyl|FP#|f4YIw*Wt`%A#)L7RA0+f67Frk(6M-y@XWL-M8}Dc=$N=N)Ed?IIbUh~I zS}8YdijrDC)GY#Vx^1%U$DX@hm&aU%@J*bIMU%-ipW$F-SSfdMp?7J>s{ZlMSF`yW zC^n&F6fd;+wc8Hi)HOza4=Z;_dJ}i==m?Q`^6#FQ7%Q#Gk8uf~MXA@fHQ<0kJDzf(4{FAH@oQfVn)JdHD1)_wcFr&VI%q!4%zMo=P}){vA7+({L$I@ zyAHrTzc=N#`k>LAdD8P79IE5*4k7YwMI%S_g-hA`huU?E6NwHP?Qr=swjC+K>~@I) zlHZ5)73oo*TX1+|a!n z;;wpD)a-V~)2q*EzSAxfj|_G|*b^SYuR{^Q-3YjG13g+&y{EeouGx*rZnvE6 ztBk}P+S3?oZ;1PO8u{*neen+5rfSx*qTg!)*Sp5%JdQxIH5ojyn_Q*yD%_e!4-3O{ zUjI5}u2%A?(A2B)C+V0h{Rj5lhaz*hI04&vtG6}3CO1pSOQ`x$NTnL<(#$mmgNUA9m$_;-)GmOzqIJ{rPwliM&! zDe+aQd#~=G6u5#lw)CyFwS1%}R>0TSQCWX?95Cp0GUqZUT#nbWXt)soYJ;vyFmHO) ze*ms1o8`ksF07yAJ}cJ<@_how!%#syjIXh9NT0oh94k0&a;IXv!fDpEcGZ2jP53u! zNBlGZ5~254b$E@YYJ|pFI5XGJP@}YSC?2RQkqdx;6KS{k>XMa{F?-QVf+1pphzy@) z8F~_xtY1Q!fqiDB?e9U8!|5xD@)#cc7f?$90v-bt000080EKoGQ9f$f&O{La0Ju8< z01N;a00000009610JMPs0001Qa%5$4WpXZgVQp+xR0RM7sVhETb$AN^0R-p+000E& c0{{R}O9ci1000010096}0001r5dZ)H09bat7XSbN diff --git a/src/agent/docker-rest-agent/intergration-test/peer_config.zip b/src/agent/docker-rest-agent/intergration-test/peer_config.zip deleted file mode 100644 index 5fea13e828b4277d8a5f0979757c437396a3a4bb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10306 zcmZ|V<98hl@F(ya+iYyx&W)4CO&Ytg?Z%C5+qQ9o#+03ZVVa&R_d@iMlv{i%)sfT~~=G5nvn zc_INIpiiIxfd4&ue%rWgN+<4md?8lzxfEWF9uFp7oNjEvM;4oHhf`=4Uv{jn#?J!X zuxJQQ#2|{jY2SBv^B%;Ll8LL16hf9x#5Mk_UPd2AL3_>fTAj`NwY4ml=fxVDHC25J z(wmGa5qz6YK{ch`wO4b-u1=wsjjOIowixa%ixR6>Yl95?OyU~w{_q8f(xdSvukKfm zKNt7AKEZ<7GWPE!p681`uT-3XJij}X>E;yuopfYU)1MhNosYE)F;n2`u{4^jHvQ!t zbq!dq)|spUQ!h318^`+6$twp1%J|)sN^Mg!-OYiM=~ed=#9y@cZaWp8ix+fAKwFOrS9?_;Qk2B0mI*{1;807JhRD9C$EK&;U zm$zN-h;Zf4%TPM%0(8&DA7NFjX&{fM2m+?>3a&gFE0tmQ28P*~d&YyIgRNRc3%(Tl zHvHYt3oSF9mj!71gk%VMeao_4a{mpB#GmXz zq;XB6cee*+!_%K=;;zmxDt?<6xe*G(YnQi{LVtswCdj@EU!@4G;wdxgdlzOvG}9Q7 zdRMED$fq+5OQD4B1V=*>T4i+wCQxFmE`UhIG8*!9*=^G{*koV_Lwu@c=9Ywr~Jn(8-maI)g z2E}Z_uj&?(!#b@w5b$^IjR=Au7jY4w1Nq`Y|S5WE}Aq#!zv&u=P)+}Q4yz3gg zW{hint*{b^8NO6>`v}7)g_)VFM4;(R#u&%NZhXjzQ;= zX6e0ZEOR2-Gm2&q7d@OR^h>u4zpok&uP15*IX;UeF8}YwwSnO&f#BceSCxK>v~g%6 zEQnIi!0l1#15S~j#Argy3-QRCA&3A~et~ZQw&#kS1@v-QrY~3?XGw~$+XJt5} zN9YUK{00o!v6v4!O6ftK>6B@>ZZ@}+C>|!;5a-`AtlaVUJPo+%jRR9EkLp8+A#2a% z<_Ejj6#X3n_@Qwq=BYOGIC}s%xUMI6;GRnjuG`|B&Z%fTRSC}$6p$i^i2FqUBSN{9 z1vq!w74=8OL;w>&bI7@V2sk~nYyjy4WZz~d3|;if#4r=&WiMR$PLgbRj0fJL?DjBm z?CN~Lb0Eu~BwS4?4TgYVD(&`BkaU4ed6Q|={QIdpU&&^i zQv`3!O}{#auF+Kx;=j<^bJ^uvf(Q?-Y6v0!UY0P|{82F5fh)m6D}ke!3X%n(ATXP* z(%j60>c7DY_`=o7ub@@QMl;VTO$Ay}6fKwomQ8v2>uJCB_Vcn;adIybA&?Ebk4$3o z)0wcsUCUK@Id@h8MP;d zqn>MHsV~g4SO#J$#B0hYXqiN-7eIZ(_p87h^W(v!e{!<8eVjEkJCcp)nnq(j+#Z#= zqmPJ6Vja?HtpFs`IrsW_YLauncvB{gH%%-I`=)S#p&&BR0R#aoJWp^E){h}lu-0ng zK<&_+jKR-uQMgt+xOU~ks^q9AcbU!*YEz(o0B_D@26nDR!kj@l^|u=p()1n$cKr!t z_NX@CSOXT&$XRk(;6jbvl~|iQ_DsG$YK~`{>b{kv02!Zc8U2mGfG0hZ7@n1u0+Mcs z_(^fUGkZ|AT%!fdI&KFVkxMsCfM_@Cm1cmR%*A>dJJxmd)=7z=<3&G1*EVcaHe2eC zJbzFlJm&NHj!X=0TJSoQ$~Zzk?yJyP*##6FG+3VtEdDOS*k1K9lW51=4=>&>sE*`y z)ku&+M<*oS8-{`e5->~ zp3#qd;6{zG7g$gq@7IwLbvzPb>6r zt&!|TGV4GFOP6~Jt7VQu==9@tfLE4D%pJ2G$1l!Zr9Dhj#5Qk=fY zs7$1QmVra!)GFHpPI!lD&h=JwyUyZ6C!hL7dvBP97J{csXfLlF6>tYWyG=COeolOo z;msWx=fgRGZK^j$vg7$cgE-Gb5i1|4w-vxy`<7r2qk>;8FEp4Sr>()}|Amz9WO-@! zHKOhGI!Xzv_MCJ#FVd)zGMhjiuI%@eDa)0=UjmGv#YaHPya+tPBwKa%eL^<)4Y&*Y z2kUhU+Vs96P1lAVQsX?YC{atjleFj;8VzVOUqU+1nqfrGE`iz|_7A|}8uhi$rTPwP z#QGKSxuV$1ur3&BjkOFHWWYpLD21RJfrxo3nc9rlJcIIA)V5ABB$w5seu z#D`7H>DF|@lxZGlEQHPq1L7=?2ECPF!$ttIgz5N9CB$4;3Dh*})!Y#X49vo_TM~xC z&5-Kl2teM*s@NSKqX{|8E&g6uiMHGk1`aaHz_Xg;Nr(NPUl_mY&XZ`q=q$OyV5q ztGG-#O2Drj6DD2XiBZgGsLXw1o*DL#O}Kt}XABi=NW2{$%fYU9NmFib&x#)WN`MpE zl7-9X5TB)Z{G#CZectPJSY=cj#^HG>ztI`az>Js#3ZVu!ftHheI4!s@(@|Zv#h3+_ zx_MBt3xLWQOTq4rx}A4Y**BBT92sAOTl$bRV;!o40zWqpjR$79@u!kW@*oU%=T!;4 z^%PkZuBLV%Rq*v%7ts84_r+PVQ0C^Kz_qi^0hqnn>{jRvb*KlLlU3MBG+xtj=t+{0 z!!Js{?B~lVQch!3+IU|NmFn>%P<&`q4(T`1v~gR>JN%u?@oik0&7!kKH*ZE%?iCnK zgBOWCNQ|qwRZyYM>(B;l6^%p64$*29Mch z6~4@|E3CjZHdJ>+LL?0_tE<}c!|3qDz=apB*b$2Z6O7TAXrTZgS#`NpDut26xG_GM zdWL;b7Kgs$wFTHC$I%e11e*D)>l#`b6)P$Dqu|oyhKcT4RbA7B^z@9ee8-H}0L^-vE&YgtA6JO~G@5+f3d1Oik6z(B+> z9;7Jlbaec+?PS`Z2WcNk7G@#nhB^0Ci0Cy&X$Fp=Z%oY5QHod5E|6(naG}ew@^Syx zqN|UR-Nc&RkEj1^c+z&YK=-UE`kiRm|FO{T7;Ck|x<&V0e~pXnuz7dv@tBZi1rvA^y#)y%T}$sbdO^9yJQaq4sz+h9V zKHx!THt~d@|B;E&CP$5~jNW6CpOPYhR$bGP1QRNf*5#zTe@+v7XuFyf(h*-Tv4Zld zq%mw%3OS5 z-N6!b_!f3#v5e1Dt(;|&2-LCY#!FD&s7_;f$P8bzYKd`qA?k*TU!5U~6L4~iA%oi) zDnHw6mg-$9;GZ4W2HI@Wf>K?b_$UU;g<|(21uRnYJ&HiaJ&3}>r|SGeN(Gg8o3uPu z>^Bl&ZB^c_wcJk)5MWfzZYQUVhZdO-cFmvT9u?N0ahkqjN#A1lz7fX;)6?)Wjc|kp zj?OkRGhlD9j4m916QPPtH62YdiTTBr9GNA41&EIf_6NPMvO~-e8(0m$pnx+E+Df&a z9vAst8nY8fdjIhJhYaTO=vBH7_p3Kt)uhWXM5eQLbTqk=yboGUGs^lHtYtZ@5|c_v zllsHdolfO#D)4UQfPc_d$5)}D`@tA@o|0gdA8WF~d%OQ~J+GKEcA0PLaYV~_+k_KC zGVKGEB3=Nijm9kT3j&iUS{9uJ)rDFtTijZJZ&!)KRmltoEyZ(}6h*0LRMym`R}kWC z@jJaYb)ZTq(w@m*YZDqmplXXD1KU`{m;(x~th?*+WhedYnfwJwr3C4@l?o6kkv%a0 zHAWh}>1@}{RjFfLFvv)q&_3}P8~@hr`zsnAhg0SB4I)lPMhC<`r0qO6WdWtCy8fDe zO4%X>L?W*N)G#vUUJHG5ftdP(CAg$iNM?F6JdUJ7@%wlG2i6R`;3^ImcMB$?Kak0M zrkTD7>?Q}F7zYU;m&O2^>q(*+>L>(m=i8l}H8+IEm@CYQCz{nnCo}_GC%nk} zuUje0Z0t!7Eht6pq*@-b#^)@5fxE<{ZA*6|-=j~ne$?X~eLXgpA_D)+SvmArF4aWY z8%a{rK|**NbtsVxC{Eb_^ZF_3Z3p?~Sy)uihY=#vXv6rnak%tpSeQXa5O65-%5Enl ziH~<4%8x&+R(wH%mV>QN8n9wCTchm&nQiy>!?`nQj}3Q)5`csEtAiB)#=`FpXM36r z#DNNTwIjSF+6gt%?QF$*AEq@jy7(o}3N~E>xex8167g3fBlX8$MiA8#Odr|rmzYwD z0-6sf1*Hc{hj*^bt4TJI5&N0%bd#+jWgKtmCZ>-e@L6WBJ<>PgwE&Jm;e3X9`3H)e zNoivYpPf#9z1fs`Ans|n7*(lu!|3zdV_{8*VBg3B@mtzM=g=6Gs`~vO%&f_t{aq#% zwx`~O(K;lGn3$2Zuz~Eo`+9R&b0++b)3-5<O|$ z+fxM6?2SHb{``(h1(OfQ&(CKZ8-52+aZ+st81Am3O+M@}{DIP;naJc3PQG=kZ&%VVc#GNL_Q(qtkCZV&Qj z7I@hf4cJ2^lu>G;*gc-E04WalpXT0z52i+(iz^La!@Cvxt{%cJ6u zyvvHFOQ(ZVa7A*PyU^Q9aWY}$9QNSaAx8V55nN$Bm8mL%Ylo|N&wMR34340HstnBJ zlZILp&YL=!m(2m{S>U*tIuw4)3q(77@7_LWG45By!^K*xAszbJ%Y1EY1jDH3%oA{N z9YgP(aB!>sA*$+^`iW4i?e6s`;_a(VP4B)wIJNQQL%m3p)9k`G<9o-Ec*9g!_&al7CTGx4 zNRxs+-o?11#uo4M6XA1unw3>VkF=*R)c;!K_9gFz+Uskm`plgeR&*}jupa3zi3i_Z zhklOnr7(%K$5<*hPI*Sbv>;1%{=@-GA zrM*JkldIdDxuZrqNEfKqZ|WgC@Anis>M5)(;@hq8NZY+FJMD_eVBZ>=hyyOJ@AA(h zcR2|Kl=QK!X#>It{WJj$S48(d5*%|lcz;_?Ys16uNO2;zNCIZ3{JwwaM(5;%!dOFV z_ndQUky!ZnR#(C?YWZcS{oe>23`OcgFs8q{-|7xtKWKU>zRZ$8gT_X+OBPiXsb3GA zFJpVAj7okeJkYS(Z@j!WxRJbcDYuiM_PJmrZ%&o)5oSU!r)WJ;K!IX*EO>hKl-JuXLE~LOdP_$R2$yE_QCq?Rh_MSp!1B! z;v1-PilJ*+ZS7c(!ZCsoSZynCH$;IikX-&3B3g5TXU~_n;mZB5*q6iUPJlZ^s$&;4 zl1#3PBLph>i#h%@_Y7~?#3%E;uTtJNX+0El?-!kwxJ%< z%Zy-91PGa9u3FEje8#hwSV2zwrPX}tw7*O2ioH)Q({yS}`A~=y8y#J&p$MB^_=Shc zTEWxNqZFaoXtRqAR^mgH7vnGfaObB1=EI<%<{uFjy-Sn zQBHQ{Nnp6{rs0KnCX`NyV{&jOx*}_d^4yEB<6M@TZ^HNJnxe2nx#;jQnGq6GaEUmc zESrzh8&}J?orE{JIHgf{o6xT!=E|~SQMgAyWVtI?r}Dg1rnNGY@2^~%_CE=U0gjP* zm8}={-SW1mv0S9B+=WvZ>IP{c>KZJqqIFJLvO?*ru0X`Zqi2MuY1mcY=eArdU1ml$ z+n$O{(aEpKtT{XIg^8`I0`CnZGxADH0*->AWJ8db$a$qnoK<@mTMF-eu}==W0QY+v zVFxz`foEeL{^>+h96f(Z2PR{}Lo#=x32@NdS4qOTmuxdoIc_B>yps}}ZZv!!JUi!0 znXERGGM6vl#yR#$y65r4cc1rC!qvMQ@b>XZxk0=Q)@deAuSm>TZlq7>HvWjJ4Q9aS z>(p8)-Ci6Wmuia4YHe}u{tbEj45qE>y~fPuNcRy>INuK2x&qhEBSz)dLZQTDEpIWq zrOkLjhuJbP*3yQp5(KxwUL&s=++{Kd!ALM{l;)aVZrWg1G{L^1usRW5&kKNTN2i+H z1XWZ;EzN+)BEm917p?V{?B1!<{gW#YyVO;ol15)O$vJi3)D^QneuLOOZEAGKe|~&B zmZu--palF4fP|TXg~Y>9!GfgMJgio_ntC7})NF7ra-{-O;4G3u*zyln_bJ6n$)|xMAp?(Ou3iY%9Ba&@M zb-?1+IsphNZ2_rsGf#Ifnz2yq4j9`c&WO97B0`7=2Ds7Z%2i@k3JF;3#mG_sMQ^Y6 zx|1|rbXaw3X)>4PKe1WU&k~n(e;WF5Y2WA&*#E4qgSH|sqSceEog?`Bz1aB#_&o)? zMUuaXaz2Hq`B+JO{FEBD_vg7lB62EMR+InjQNm9myg{mS5ZPxr$dKn?uf*9pg4kW@ zjQM6Ku|xSWag;bU06H=oK#f1#gcM|0C3ZUV&#*!GbZfpsd!;~`>R}5L+B1n&T-iJ^ zVW#}|6|mW8c0wlZUk$cfLB2lOkqZhTn&N|3XPXQ?KVPp8GL$V-M{0A**8K;fvMmM4OJx8(ca52^f`St|6@^w%zis?YCh|Hog?3#78Z zCECL|t{b;qsOJe@FHbf((A84HkfqaDa94(oZVxibPR1vLM6yD&6-Pk(W2aaWXFQKy z#?>TNc^`^exBP-`v;9QoUH6(;*bz9r%gg#Zlh_U8@fW=JSSJE)gZXaQdc@z!{+1aR9@Gg+HPhDB)Cjl!Gj75E+DG+CjZ>Y)J6#Wa@?l*8gFadX z6&NYqd>r4+++u~LM97o;)^QoX{!Y{0uz+Tso9`-gwwAo0zCfeLWsW7ZDC@<&r^{nL zHK2xHdQ^kixU7mz7g!qc0MP*QJNmc#QtR(XU%xfE+inc89r9R=5uS;Z2WWrlSTeL2 z!;B#~jK^6{IL}WmiD^?q^&>U$>y-s+KEI*2cwCADq3>&T%?leTjk{yW_wf>|c8kBK z_ZR~=kdI+7Lf8-aQqSoLTB(0DK)jpLQlLhUer$&qpV+_P zu+`ol*m&cEZ?)A{l7;`LyrjI?%| zWxMZ2L#2?;xN#uFBahB8q5xG@XDQLU(>AF_`@60cius*c;3|CT@zrmfO^O0j{8o(K ze@dg(!B?^sRn&Q~RpNV$h^s1;=T%n$D+g=9P-T6L# zbXgeJagW);Iyhf6ASMA~#C8O#d-gKVzQU5ce~h^+^xFEPAB?PZlkYuHFBT>EulxBs zIY+V?TpT}CT2H2t2eFE}ZEr`^3{KtZFmqqe)k-L>z><2`YL73z%wqEv7!%yf{cC)? zm$I5tYRUEGZlA~Me-h>Qxn^K#dyiHXXFlPHqRn}dr1hb!Q)u%4iVNuNV!CS-exEVe zxjMBT{JoOS^E@b}S;L0d#YlI-<8-}{uc>1cO6BFN01VQ3et~~HJbAxAc=$UqfrghR z;}BP?Ec$t~xBq@{bb1ic`|cP?CzOkCH{6 z85s5giLM#8w{g?yMg6p%us+n83L>GA$Bv($#v_$nlg1)kN*C(j(m(C<1UDVm_8fW6 zY6s&D0^A63n+5_$ukTtkb5oXr(q={Cec}mlDxOyugG?B^4k+o*y6jQ2Leg+0ci2(| z*W0!bXp#rXh$=G|Pg*mWXtKi=Db=%x26uqk>?k)fLIG2nrpK`B&r<6xaQ+`4?in<+ z_z@$S{hn+qkYybm8MdAJdTYbD1m%GoaZs(5<=GkT9gc5CJJ+aA3D*%F4qTe0hH04+I<)7h^>aOlsEcfdFpo+k!jiiP;F8UoE z?r`~h{`1GS+j8&wcp)_{Cc3)r+ujmg-4dNdHBfvqRC+R0e0+Lpx~n?hY4rcP6KyAY zZTlMid`Dwb>LRJ4@|$q~!V$THq-DKyxPi#d_;3*YqM0pw9%K*}NuyZV$@S-cdL-Uq zS*+P!IuE_o#|@5wDv`r$gIVcSl;ndgnF>AG-qyTH{Kg5scwf9{Z|~~Kr2=U^ZE|`r{2fRBxCjcC+)i{V@W^#v2%gIvx zro#NyDw9cif+aK?@)<}!i1Bx<0vi`d2oQg6<8$<|_g_P4P0SQ7Gbd z#V&bAuoF~Y5+k=t*#Z&ULWbfVOw?W#@r<-c=#S zZOh&cjCjSE6vNoe4_Pxgb>Z6larfC1w10^u^CF(wOEhGT_16Z6J9=bMpSz%ye@Ql8 z;g>?7_WL!-zUHXo$T=j3o;S{X&DX{36rPZ?tP4}q3DT>#)Ov8}Q%I{s{QllURL6TSljlGEn zNY}yWgSvh1L{^IELUzJ4UA}zk4=MM!mDjUSZ&^SSvSwm%J1i$92$lR_JlnjV?FvG-_c1i&8gsH>m7{_u)d5E)$ToBNfJ zuTF9NJTOP4*LJ*>?ENt#Lpg6SL>0hb#k$mT^5}4*se3sKXYNNcvHr=Xg0dC)rW0r5 z;TBfrxFC!6N=}1CjM`J-tq)|v)xn0MW{SN>C)Kv5?$6Obb{Zd$+&GCb&ORwCNzwf$ z6yKCe9kXltNeB&7?`OFrBl;YMI6Fz#51;Wr!TAG)iJWe!<>9>ju?xix2*JPnP7f6m zM?!!8QcU7Xf?6Ou?QPkx>B`i^)llY1B z{$@b`hzL{sdSnyy=e%8y7RR>_fvc4mY={1W|Id1?kV@7vsSvi`3iaE!>O^Cu)&$nw z&`2-u*M#megs_b1QoovJ_V`5{v(_U>e=Y*dEv{tzo&(4g`c8xK1UCKlFy_rgF^@3X z+wrZ5f%7A<#_l}O*}=$qWpzSj5}%v;tvg11*CBnz2%JA6DcDQLT&NHT*XL`KBmAVo zgPF3HFZ?e}-Bu1Z`lZ%t!m;{~(bfsTbs5|qrASvNoRpnBsyb06Tc+HT?f z13h;;r(A!L444`e!wLnwi{hVYQF!(T*Kr|R>p3Zb^UR=!-WdCuQ*gB6^5ahpYIzPG zThV)*i42y_oR7Nn!iN}$WPi4%=^E&{2-RF$2~$0^l*7!lC(2JS*le+l#=VOdu(VF~ z__ROF0}rD5P)kHCGHNy;?-UZI{UWh*beb55{2IZN*8S3Ii>Cc2J_f!g%IfL~QB!&4 zfe85H`T1|Bv)xDeN4~<|X+6rUhDwdl+N>I_r3{<(ULpX^XnclrPs0fnZOiyE9lXBM z3VRfY=5Z$nZ>xrg=e1cFu6wd-V4cpRLY{G-FwL1B`>F%?N<^H#Z zAq7N;K9+-F8MuJE0an8a8R6x z^HaYJU563>!;mYnf@3}o%Es5~`$Z**>ERehobJ+}?5zy1hm0tN$#E&sn|l0HELQAz zeb;Mn)`G()l8QV8Bnj02kN4sK$Nm5Si2u?Y_&?YGh5cau5BB?ipr4974DA0rLj2dt N|Lv{+q#ywBzW~Kh^PT_z diff --git a/src/agent/docker-rest-agent/intergration-test/test.py b/src/agent/docker-rest-agent/intergration-test/test.py deleted file mode 100644 index 7a314a2ab..000000000 --- a/src/agent/docker-rest-agent/intergration-test/test.py +++ /dev/null @@ -1,86 +0,0 @@ -from requests import put, get, post -import base64 -import docker -import json - -client = docker.from_env() - -with open("msp.zip", "rb") as node_msp, open("tls.zip", "rb") as tls, open( - "block.zip", "rb" -) as block, open("peer_config.zip", "rb") as peer_config, open( - "orderer_config.zip", "rb" -) as orderer_config: - data = { - "msp": base64.b64encode(node_msp.read()), - "tls": base64.b64encode(tls.read()), - "bootstrap_block": base64.b64encode(block.read()), - "peer_config_file": base64.b64encode(peer_config.read()), - "orderer_config_file": base64.b64encode(orderer_config.read()), - "img": "hyperledger/cello-hlf:2.2.0", - "cmd": 'bash /tmp/init.sh "peer node start"', - "name": "cello-hlf-peer", - } -print("-" * 20) -print("Test creating a node") -print() -n = post("http://localhost:5001/api/v1/nodes", data=data) -print(n.text) -txt = json.loads(n.text) -nid = txt["data"]["id"] -print("-" * 20) - -print("Test starting a node") -print() -data = {"action": "start"} -response = post("http://localhost:5001/api/v1/nodes/" + nid, data=data) -print(response.text) -print("-" * 20) - -print("Test restarting a node") -print() -data = {"action": "restart"} -response = post("http://localhost:5001/api/v1/nodes/" + nid, data=data) -print(response.text) -print("-" * 20) - - -print("Test updating a node") -print() -# TODO(dixing): use different commands & configuration files -with open("msp.zip", "rb") as node_msp, open("tls.zip", "rb") as tls, open( - "block.zip", "rb" -) as block, open("peer_config.zip", "rb") as peer_config, open( - "orderer_config.zip", "rb" -) as orderer_config: - data = { - "action": "update", - "msp": base64.b64encode(node_msp.read()), - "tls": base64.b64encode(tls.read()), - "bootstrap_block": base64.b64encode(block.read()), - "peer_config_file": base64.b64encode(peer_config.read()), - "orderer_config_file": base64.b64encode(orderer_config.read()), - "cmd": 'bash /tmp/update.sh "peer node start"', - } -response = post("http://localhost:5001/api/v1/nodes/" + nid, data=data) -print(response.text) -print("-" * 20) - -print("Test stopping a node") -print() -data = {"action": "stop"} -response = post("http://localhost:5001/api/v1/nodes/" + nid, data=data) -print(response.text) -print("-" * 20) - - -print("Get status of a node") -print() -response = get("http://localhost:5001/api/v1/nodes/" + nid) -print(response.text) -print("-" * 20) - -print("Test deleting a node") -print() -data = {"action": "delete"} -response = post("http://localhost:5001/api/v1/nodes/" + nid, data=data) -print(response.text) diff --git a/src/agent/docker-rest-agent/intergration-test/tls.zip b/src/agent/docker-rest-agent/intergration-test/tls.zip deleted file mode 100644 index b7f4a2c9f4268c9c59edf11c1ad5dd482b2f6404..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 158 zcmWIWW@h1H0D*!?u|O~bO0Y1xYJLGBAIcS%_82&nfm=M!gA*NxN#L5Oz!w7_4K-vz(VE_P%t`{!= diff --git a/src/agent/docker-rest-agent/pip.conf b/src/agent/docker-rest-agent/pip.conf deleted file mode 100644 index 89c3b57e6..000000000 --- a/src/agent/docker-rest-agent/pip.conf +++ /dev/null @@ -1,5 +0,0 @@ -[global] -index-url=http://mirrors.aliyun.com/pypi/simple/ - -[install] -trusted-host=mirrors.aliyun.com diff --git a/src/agent/docker-rest-agent/requirements.txt b/src/agent/docker-rest-agent/requirements.txt deleted file mode 100644 index 380071820..000000000 --- a/src/agent/docker-rest-agent/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -docker>=6.0.1 -Flask>=2.2.2 -gunicorn>=20.1.0 -gevent>=22.10.2 diff --git a/src/agent/docker-rest-agent/server.py b/src/agent/docker-rest-agent/server.py deleted file mode 100644 index 2ccd6fd3c..000000000 --- a/src/agent/docker-rest-agent/server.py +++ /dev/null @@ -1,211 +0,0 @@ -from flask import Flask, jsonify, request -import docker -import sys -import logging -import os -import ast - -logging.basicConfig(level=logging.INFO) - -app = Flask(__name__) -PASS_CODE = "OK" -FAIL_CODE = "Fail" - -docker_url = os.getenv("DOCKER_URL") -storage_path = os.getenv("STORAGE_PATH") - -client = docker.DockerClient(docker_url) -res = {"code": "", "data": {}, "msg": ""} - - -@app.route("/api/v1/networks", methods=["GET"]) -def get_network(): - container_list = client.containers.list() - containers = {} - for container in container_list: - containers[container.id] = { - "id": container.id, - "short_id": container.short_id, - "name": container.name, - "status": container.status, - "image": str(container.image), - "attrs": container.attrs, - } - res = {"code": PASS_CODE, "data": containers, "msg": ""} - return jsonify({"res": res}) - - -@app.route("/api/v1/nodes", methods=["POST"]) -def create_node(): - node_name = request.form.get("name") - env = { - "HLF_NODE_MSP": request.form.get("msp"), - "HLF_NODE_TLS": request.form.get("tls"), - "HLF_NODE_PEER_CONFIG": request.form.get("peer_config_file"), - "HLF_NODE_ORDERER_CONFIG": request.form.get("orderer_config_file"), - "platform": "linux/amd64", - } - port_map = ast.literal_eval(request.form.get("port_map")) - volumes = [ - "{}/fabric/{}:/etc/hyperledger/fabric".format(storage_path, node_name), - "{}/production/{}:/var/hyperledger/production".format( - storage_path, node_name - ), - "/var/run/docker.sock:/host/var/run/docker.sock", - ] - if request.form.get("type") == "peer": - peer_envs = { - "CORE_VM_ENDPOINT": "unix:///host/var/run/docker.sock", - "CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE": "cello-net", - "FABRIC_LOGGING_SPEC": "INFO", - "CORE_PEER_TLS_ENABLED": "true", - "CORE_PEER_PROFILE_ENABLED": "false", - "CORE_PEER_TLS_CERT_FILE": "/etc/hyperledger/fabric/tls/server.crt", - "CORE_PEER_TLS_KEY_FILE": "/etc/hyperledger/fabric/tls/server.key", - "CORE_PEER_TLS_ROOTCERT_FILE": "/etc/hyperledger/fabric/tls/ca.crt", - "CORE_PEER_ID": node_name, - "CORE_PEER_ADDRESS": node_name + ":7051", - "CORE_PEER_LISTENADDRESS": "0.0.0.0:7051", - "CORE_PEER_CHAINCODEADDRESS": node_name + ":7052", - "CORE_PEER_CHAINCODELISTENADDRESS": "0.0.0.0:7052", - "CORE_PEER_GOSSIP_BOOTSTRAP": node_name + ":7051", - "CORE_PEER_GOSSIP_EXTERNALENDPOINT": node_name + ":7051", - "CORE_PEER_LOCALMSPID": node_name.split(".")[1].capitalize() - + "MSP", - "CORE_PEER_MSPCONFIGPATH": "/etc/hyperledger/fabric/msp", - "CORE_OPERATIONS_LISTENADDRESS": node_name + ":9444", - "CORE_METRICS_PROVIDER": "prometheus", - } - env.update(peer_envs) - else: - order_envs = { - "FABRIC_LOGGING_SPEC": "INFO", - "ORDERER_GENERAL_LISTENADDRESS": "0.0.0.0", - "ORDERER_GENERAL_LISTENPORT": "7050", - "ORDERER_GENERAL_LOCALMSPID": "OrdererMSP", - "ORDERER_GENERAL_LOCALMSPDIR": "/etc/hyperledger/fabric/msp", - "ORDERER_GENERAL_TLS_ENABLED": "true", - "ORDERER_GENERAL_TLS_PRIVATEKEY": "/etc/hyperledger/fabric/tls/server.key", - "ORDERER_GENERAL_TLS_CERTIFICATE": "/etc/hyperledger/fabric/tls/server.crt", - "ORDERER_GENERAL_TLS_ROOTCAS": "[/etc/hyperledger/fabric/tls/ca.crt]", - "ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE": "/etc/hyperledger/fabric/tls/server.crt", - "ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY": "/etc/hyperledger/fabric/tls/server.key", - "ORDERER_GENERAL_CLUSTER_ROOTCAS": "[/etc/hyperledger/fabric/tls/ca.crt]", - "ORDERER_GENERAL_BOOTSTRAPMETHOD": "none", - "ORDERER_CHANNELPARTICIPATION_ENABLED": "true", - "ORDERER_ADMIN_TLS_ENABLED": "true", - "ORDERER_ADMIN_TLS_CERTIFICATE": "/etc/hyperledger/fabric/tls/server.crt", - "ORDERER_ADMIN_TLS_PRIVATEKEY": "/etc/hyperledger/fabric/tls/server.key", - "ORDERER_ADMIN_TLS_ROOTCAS": "[/etc/hyperledger/fabric/tls/ca.crt]", - "ORDERER_ADMIN_TLS_CLIENTROOTCAS": "[/etc/hyperledger/fabric/tls/ca.crt]", - "ORDERER_ADMIN_LISTENADDRESS": "0.0.0.0:7053", - "ORDERER_OPERATIONS_LISTENADDRESS": node_name + ":9443", - "ORDERER_METRICS_PROVIDER": "prometheus", - } - env.update(order_envs) - try: - # same as `docker run -dit yeasy/hyperledge-fabric:2.2.0 -e VARIABLES`` - container = client.containers.run( - request.form.get("img"), - request.form.get("cmd"), - detach=True, - tty=True, - stdin_open=True, - network="cello-net", - name=request.form.get("name"), - dns_search=["."], - volumes=volumes, - environment=env, - ports=port_map, - ) - except: - res["code"] = FAIL_CODE - res["data"] = sys.exc_info()[0] - res["msg"] = "creation failed" - logging.error(res) - raise - - res["code"] = PASS_CODE - res["data"]["status"] = "created" - res["data"]["id"] = container.id - res["data"][ - "public-grpc" - ] = "127.0.0.1:7050" # TODO: read the info from config file - res["data"]["public-raft"] = "127.0.0.1:7052" - res["msg"] = "node created" - - return jsonify(res) - - -@app.route("/api/v1/nodes/", methods=["GET", "POST"]) -def operate_node(id): - container = client.containers.get(id) - if request.method == "POST": - act = request.form.get("action") # only with POST - - try: - if act == "start": - container.start() - res["msg"] = "node started" - elif act == "restart": - container.restart() - res["msg"] = "node restarted" - elif act == "stop": - container.stop() - res["msg"] = "node stopped" - elif act == "delete": - container.remove() - res["msg"] = "node deleted" - elif act == "update": - - env = {} - - if "msp" in request.form: - env["HLF_NODE_MSP"] = request.form.get("msp") - - if "tls" in request.form: - env["HLF_NODE_TLS"] = request.form.get("tls") - - if "bootstrap_block" in request.form: - env["HLF_NODE_BOOTSTRAP_BLOCK"] = request.form.get( - "bootstrap_block" - ) - - if "peer_config_file" in request.form: - env["HLF_NODE_PEER_CONFIG"] = request.form.get( - "peer_config_file" - ) - - if "orderer_config_file" in request.form: - env["HLF_NODE_ORDERER_CONFIG"] = request.form.get( - "orderer_config_file" - ) - - container.exec_run( - request.form.get("cmd"), - detach=True, - tty=True, - stdin=True, - environment=env, - ) - container.restart() - res["msg"] = "node updated" - - else: - res["msg"] = "undefined action" - except: - res["code"] = FAIL_CODE - res["data"] = sys.exc_info()[0] - res["msg"] = act + "failed" - logging.error(res) - raise - else: - # GET - res["data"]["status"] = container.status - - res["code"] = PASS_CODE - return jsonify(res) - - -if __name__ == "__main__": - app.run(host="0.0.0.0", port=5001) diff --git a/src/agent/k8s-rest-agent/Dockerfile b/src/agent/k8s-rest-agent/Dockerfile deleted file mode 100644 index 88e3750e0..000000000 --- a/src/agent/k8s-rest-agent/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -FROM python:3.8 - -COPY requirements.txt / -COPY pip /root/.pip - -RUN pip install -r /requirements.txt - -COPY src /var/www/server -COPY entrypoint.sh / -COPY uwsgi/server.ini /etc/uwsgi/apps-enabled/ -RUN mkdir /var/log/supervisor - -ENV WEBROOT / -ENV WEB_CONCURRENCY 10 -ENV DEBUG False -ENV UWSGI_WORKERS 1 -ENV UWSGI_PROCESSES 1 -ENV UWSGI_OFFLOAD_THREADS 10 -ENV UWSGI_MODULE server.wsgi:application - -WORKDIR /var/www/server -RUN python manage.py collectstatic --noinput - -CMD bash /entrypoint.sh diff --git a/src/agent/k8s-rest-agent/entrypoint.sh b/src/agent/k8s-rest-agent/entrypoint.sh deleted file mode 100644 index 9325749ff..000000000 --- a/src/agent/k8s-rest-agent/entrypoint.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash - -if [[ "$RUN_TYPE" == "SERVER" ]]; then - uwsgi --ini /etc/uwsgi/apps-enabled/server.ini; -else - if [[ "$RUN_TYPE" == "TASK" ]]; then - celery -A server worker --autoscale=20,6 -l info - elif [[ "$RUN_TYPE" == "BEAT_TASK" ]]; then - celery -A server beat -l info --scheduler django_celery_beat.schedulers:DatabaseScheduler --pidfile=/opt/celeryd.pid - fi -fi diff --git a/src/agent/k8s-rest-agent/pip/pip.conf b/src/agent/k8s-rest-agent/pip/pip.conf deleted file mode 100644 index 1c12d133f..000000000 --- a/src/agent/k8s-rest-agent/pip/pip.conf +++ /dev/null @@ -1,5 +0,0 @@ -[global] -index-url=http://mirrors.cloud.aliyuncs.com/pypi/simple/ - -[install] -trusted-host=mirrors.cloud.aliyuncs.com diff --git a/src/agent/k8s-rest-agent/requirements.txt b/src/agent/k8s-rest-agent/requirements.txt deleted file mode 100644 index a53c40ac7..000000000 --- a/src/agent/k8s-rest-agent/requirements.txt +++ /dev/null @@ -1,18 +0,0 @@ -Django>=3.0 -uwsgi -enum34 -djangorestframework -holdup>1.5.0,<=1.6.0 -drf-yasg<=1.17.0 -swagger_spec_validator<=2.4.1 -psycopg2-binary -celery<5.0,>=4.4 -redis -requests -supervisor -django-celery-beat -django-celery-results -django-3-jet -djangorestframework-jwt<=1.11.0 -python-jwt # 需要安装,否则会出现token解码失败错误 -shortuuid diff --git a/src/agent/k8s-rest-agent/src/api/__init__.py b/src/agent/k8s-rest-agent/src/api/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/agent/k8s-rest-agent/src/api/admin.py b/src/agent/k8s-rest-agent/src/api/admin.py deleted file mode 100644 index 8c38f3f3d..000000000 --- a/src/agent/k8s-rest-agent/src/api/admin.py +++ /dev/null @@ -1,3 +0,0 @@ -from django.contrib import admin - -# Register your models here. diff --git a/src/agent/k8s-rest-agent/src/api/apps.py b/src/agent/k8s-rest-agent/src/api/apps.py deleted file mode 100644 index 14b89a829..000000000 --- a/src/agent/k8s-rest-agent/src/api/apps.py +++ /dev/null @@ -1,5 +0,0 @@ -from django.apps import AppConfig - - -class ApiConfig(AppConfig): - name = "api" diff --git a/src/agent/k8s-rest-agent/src/api/auth.py b/src/agent/k8s-rest-agent/src/api/auth.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/agent/k8s-rest-agent/src/api/management/__init__.py b/src/agent/k8s-rest-agent/src/api/management/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/agent/k8s-rest-agent/src/api/management/commands/__init__.py b/src/agent/k8s-rest-agent/src/api/management/commands/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/agent/k8s-rest-agent/src/api/management/commands/test_task.py b/src/agent/k8s-rest-agent/src/api/management/commands/test_task.py deleted file mode 100644 index 9d8b32f6d..000000000 --- a/src/agent/k8s-rest-agent/src/api/management/commands/test_task.py +++ /dev/null @@ -1,14 +0,0 @@ -from django.core.management import BaseCommand -from api.tasks import example_task -from django_celery_beat.models import IntervalSchedule, PeriodicTask - - -class Command(BaseCommand): - help = "Test Task" - - def handle(self, *args, **options): - interval = IntervalSchedule.objects.first() - PeriodicTask.objects.create( - interval=interval, name="example", task="server.tasks.example_task" - ) - # example_task.delay() diff --git a/src/agent/k8s-rest-agent/src/api/migrations/__init__.py b/src/agent/k8s-rest-agent/src/api/migrations/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/agent/k8s-rest-agent/src/api/models/__init__.py b/src/agent/k8s-rest-agent/src/api/models/__init__.py deleted file mode 100644 index ef4b6058c..000000000 --- a/src/agent/k8s-rest-agent/src/api/models/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .user import User, Profile diff --git a/src/agent/k8s-rest-agent/src/api/models/user.py b/src/agent/k8s-rest-agent/src/api/models/user.py deleted file mode 100644 index 5e9419c0e..000000000 --- a/src/agent/k8s-rest-agent/src/api/models/user.py +++ /dev/null @@ -1,42 +0,0 @@ -from django.contrib.auth.models import AbstractUser -from django.db import models -from django.db.models.signals import post_save -from api.utils.db_functions import make_uuid - - -class User(AbstractUser): - roles = [] - - id = models.UUIDField( - primary_key=True, - help_text="ID of user", - default=make_uuid, - editable=True, - ) - username = models.CharField(default="", max_length=128, unique=True) - - def __str__(self): - return self.username - - -class Profile(models.Model): - user = models.OneToOneField( - User, related_name="profile", on_delete=models.CASCADE - ) - created_at = models.DateTimeField(auto_now_add=True) - - def __str__(self): - return "%s's profile" % self.user - - class Meta: - ordering = ("-created_at",) - - -def create_user_profile(sender, instance, created, **kwargs): - if created: - Profile.objects.create(user=instance) - - -post_save.connect(create_user_profile, sender=User) - -# Create your models here. diff --git a/src/agent/k8s-rest-agent/src/api/routes/__init__.py b/src/agent/k8s-rest-agent/src/api/routes/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/agent/k8s-rest-agent/src/api/routes/hello/__init__.py b/src/agent/k8s-rest-agent/src/api/routes/hello/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/agent/k8s-rest-agent/src/api/routes/hello/views.py b/src/agent/k8s-rest-agent/src/api/routes/hello/views.py deleted file mode 100644 index c8fdde30a..000000000 --- a/src/agent/k8s-rest-agent/src/api/routes/hello/views.py +++ /dev/null @@ -1,37 +0,0 @@ -import logging -import os - -from rest_framework import viewsets, status -from drf_yasg.utils import swagger_auto_schema -from rest_framework.decorators import action -from rest_framework.response import Response - -from api.utils.mixins import PermissionsPerMethodMixin - -LOG = logging.getLogger(__name__) -APP_VERSION = os.getenv("APP_VERSION", "v1") - - -class HelloViewSet(PermissionsPerMethodMixin, viewsets.ViewSet): - - @swagger_auto_schema( - operation_summary="Hello world", operation_description="Hello world" - ) - def list(self, request): - return Response( - {"hello": "world %s" % APP_VERSION}, status=status.HTTP_200_OK - ) - - @swagger_auto_schema(operation_summary="hello world need auth") - @action( - methods=["get"], - url_path="need-auth", - url_name="need-auth", - detail=False, - ) - # @permission_classes((IsAuthenticated,)) - def need_auth(self, request): - LOG.info("request user %s", request.user) - return Response( - {"hello": "auth world %s" % APP_VERSION}, status=status.HTTP_200_OK - ) diff --git a/src/agent/k8s-rest-agent/src/api/tasks/__init__.py b/src/agent/k8s-rest-agent/src/api/tasks/__init__.py deleted file mode 100644 index 1468bf421..000000000 --- a/src/agent/k8s-rest-agent/src/api/tasks/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from api.tasks.task.example import example_task diff --git a/src/agent/k8s-rest-agent/src/api/tasks/task/__init__.py b/src/agent/k8s-rest-agent/src/api/tasks/task/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/agent/k8s-rest-agent/src/api/tasks/task/example.py b/src/agent/k8s-rest-agent/src/api/tasks/task/example.py deleted file mode 100644 index f29ee66ac..000000000 --- a/src/agent/k8s-rest-agent/src/api/tasks/task/example.py +++ /dev/null @@ -1,12 +0,0 @@ -import logging - -from server.celery import app - - -LOG = logging.getLogger(__name__) - - -@app.task(name="example_task") -def example_task(): - LOG.info("example task") - return True diff --git a/src/agent/k8s-rest-agent/src/api/tests.py b/src/agent/k8s-rest-agent/src/api/tests.py deleted file mode 100644 index 7ce503c2d..000000000 --- a/src/agent/k8s-rest-agent/src/api/tests.py +++ /dev/null @@ -1,3 +0,0 @@ -from django.test import TestCase - -# Create your tests here. diff --git a/src/agent/k8s-rest-agent/src/api/utils/__init__.py b/src/agent/k8s-rest-agent/src/api/utils/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/src/agent/k8s-rest-agent/src/api/utils/common/__init__.py b/src/agent/k8s-rest-agent/src/api/utils/common/__init__.py deleted file mode 100644 index 2da9e9042..000000000 --- a/src/agent/k8s-rest-agent/src/api/utils/common/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .swagger import with_common_response -from .db import paginate_list diff --git a/src/agent/k8s-rest-agent/src/api/utils/common/db.py b/src/agent/k8s-rest-agent/src/api/utils/common/db.py deleted file mode 100644 index d0ac7ca0e..000000000 --- a/src/agent/k8s-rest-agent/src/api/utils/common/db.py +++ /dev/null @@ -1,26 +0,0 @@ -from django.core.paginator import Paginator -from django.db.models import Func - - -class Round(Func): - function = "ROUND" - arity = 2 - - -def paginate_list(data=None, page=1, per_page=10, limit=None): - if not data: - data = [] - - total = len(data) - - if per_page != -1: - p = Paginator(data, per_page) - last_page = p.page_range[-1] - page = page if page <= last_page else last_page - data = p.page(page) - total = p.count - else: - if limit: - data = data[:limit] - - return data, total diff --git a/src/agent/k8s-rest-agent/src/api/utils/common/swagger.py b/src/agent/k8s-rest-agent/src/api/utils/common/swagger.py deleted file mode 100644 index 7c06a7478..000000000 --- a/src/agent/k8s-rest-agent/src/api/utils/common/swagger.py +++ /dev/null @@ -1,62 +0,0 @@ -from drf_yasg import openapi -from rest_framework import serializers -from rest_framework import status - -from api.utils.serializers import BadResponseSerializer - -basic_type_info = [ - (serializers.CharField, openapi.TYPE_STRING), - (serializers.BooleanField, openapi.TYPE_BOOLEAN), - (serializers.IntegerField, openapi.TYPE_INTEGER), - (serializers.FloatField, openapi.TYPE_NUMBER), - (serializers.FileField, openapi.TYPE_FILE), - (serializers.ImageField, openapi.TYPE_FILE), -] - - -def to_form_paras(self): - custom_paras = [] - for field_name, field in self.fields.items(): - type_str = openapi.TYPE_STRING - for field_class, type_format in basic_type_info: - if isinstance(field, field_class): - type_str = type_format - help_text = getattr(field, "help_text") - default = getattr(field, "default", None) - required = getattr(field, "required") - if callable(default): - custom_paras.append( - openapi.Parameter( - field_name, - openapi.IN_FORM, - help_text, - type=type_str, - required=required, - ) - ) - else: - custom_paras.append( - openapi.Parameter( - field_name, - openapi.IN_FORM, - help_text, - type=type_str, - required=required, - default=default, - ) - ) - return custom_paras - - -def with_common_response(responses=None): - if responses is None: - responses = {} - - responses.update( - { - status.HTTP_400_BAD_REQUEST: BadResponseSerializer, - status.HTTP_500_INTERNAL_SERVER_ERROR: "Internal Error", - } - ) - - return responses diff --git a/src/agent/k8s-rest-agent/src/api/utils/db_functions.py b/src/agent/k8s-rest-agent/src/api/utils/db_functions.py deleted file mode 100644 index aa5da3f07..000000000 --- a/src/agent/k8s-rest-agent/src/api/utils/db_functions.py +++ /dev/null @@ -1,14 +0,0 @@ -import uuid -import shortuuid - - -def make_uuid(): - return str(uuid.uuid4()) - - -def make_uuid_hex(): - return uuid.uuid4().hex - - -def make_short_uuid(): - return shortuuid.ShortUUID().random(length=16) diff --git a/src/agent/k8s-rest-agent/src/api/utils/enums.py b/src/agent/k8s-rest-agent/src/api/utils/enums.py deleted file mode 100644 index cab64c903..000000000 --- a/src/agent/k8s-rest-agent/src/api/utils/enums.py +++ /dev/null @@ -1,102 +0,0 @@ -import inspect -from enum import Enum, unique, EnumMeta - -from django.conf import settings - -ROLE_PREFIX = getattr(settings, "ROLE_PREFIX", "tea_cloud") - - -class EnumWithDisplayMeta(EnumMeta): - def __new__(mcs, name, bases, attrs): - display_strings = attrs.get("DisplayStrings") - - if display_strings is not None and inspect.isclass(display_strings): - del attrs["DisplayStrings"] - if hasattr(attrs, "_member_names"): - attrs._member_names.remove("DisplayStrings") - - obj = super().__new__(mcs, name, bases, attrs) - for m in obj: - m.display_string = getattr(display_strings, m.name, None) - - return obj - - -class ExtraEnum(Enum): - @classmethod - def get_info(cls, title="", list_str=False): - str_info = """ - """ - str_info += title - if list_str: - for name, member in cls.__members__.items(): - str_info += """ - %s - """ % ( - name.lower().replace("_", "."), - ) - else: - for name, member in cls.__members__.items(): - str_info += """ - %s: %s - """ % ( - member.value, - name, - ) - return str_info - - @classmethod - def to_choices(cls, string_as_value=False): - if string_as_value: - choices = [ - (name.lower().replace("_", "."), name) - for name, member in cls.__members__.items() - ] - else: - choices = [ - (member.value, name) - for name, member in cls.__members__.items() - ] - - return choices - - @classmethod - def values(cls): - return list(map(lambda c: c.value, cls.__members__.values())) - - @classmethod - def names(cls): - return [name.lower() for name, _ in cls.__members__.items()] - - -@unique -class ErrorCode(Enum, metaclass=EnumWithDisplayMeta): - Unknown = 20000 - ResourceNotFound = 20001 - CustomError = 20002 - ResourceExisted = 20003 - ValidationError = 20004 - ParseError = 20005 - - class DisplayStrings: - Unknown = "未知错误" - ResourceNotFound = "资源未找到" - CustomError = "自定义错误" - ResourceExisted = "资源已经存在" - ValidationError = "参数验证错误" - ParseError = "解析错误" - - @classmethod - def get_info(cls): - error_code_str = """ - Error Codes: - """ - for name, member in cls.__members__.items(): - error_code_str += """ - %s: %s - """ % ( - member.value, - member.display_string, - ) - - return error_code_str diff --git a/src/agent/k8s-rest-agent/src/api/utils/exception_handler.py b/src/agent/k8s-rest-agent/src/api/utils/exception_handler.py deleted file mode 100644 index e0e1d7c94..000000000 --- a/src/agent/k8s-rest-agent/src/api/utils/exception_handler.py +++ /dev/null @@ -1,34 +0,0 @@ -from rest_framework import status -from rest_framework.exceptions import ErrorDetail -from rest_framework.exceptions import ValidationError, ParseError -from rest_framework.views import exception_handler - -from api.utils.enums import ErrorCode - - -def custom_exception_handler(exc, context): - # Call REST framework's default exception handler first, - # to get the standard error response. - response = exception_handler(exc, context) - - # Now add the HTTP status code to the response. - if response is not None: - if ( - response.status_code == status.HTTP_400_BAD_REQUEST - and "code" not in response.data - ): - if isinstance(exc, ValidationError): - response.data["code"] = ErrorCode.ValidationError.value - response.data["detail"] = ( - ErrorCode.ValidationError.display_string - ) - elif isinstance(exc, ParseError): - response.data["code"] = ErrorCode.ParseError.value - response.data["detail"] = ErrorCode.ParseError.display_string - elif isinstance(response.data.get("detail"), ErrorDetail): - response.data["code"] = response.data.get("detail").code - else: - response.data["code"] = ErrorCode.Unknown.value - response.data["detail"] = ErrorCode.Unknown.display_string - - return response diff --git a/src/agent/k8s-rest-agent/src/api/utils/fast_enum.py b/src/agent/k8s-rest-agent/src/api/utils/fast_enum.py deleted file mode 100644 index 28b685cf6..000000000 --- a/src/agent/k8s-rest-agent/src/api/utils/fast_enum.py +++ /dev/null @@ -1,283 +0,0 @@ -import re -from functools import partial -from typing import ( - Any, - Text, - Dict, - List, - Tuple, - Type, - Optional, - Callable, - Iterable, -) - - -def _resolve_init(bases: Tuple[Type]) -> Optional[Callable]: - for bcls in bases: - for rcls in bcls.mro(): - resolved_init = getattr(rcls, "__init__") - if resolved_init and resolved_init is not object.__init__: - return resolved_init - - -def _resolve_new(bases: Tuple[Type]) -> Optional[Tuple[Callable, Type]]: - for bcls in bases: - new = getattr(bcls, "__new__", None) - if new not in { - None, - None.__new__, - object.__new__, - FastEnum.__new__, - getattr(FastEnum, "_FastEnum__new"), - }: - return new, bcls - - -class FastEnum(type): - """ - A metaclass that handles enum-classes creation. - Possible options for classes using this metaclass: - - auto-generated values (see examples.py `MixedEnum` and `LightEnum`) - - subclassing possible until actual enum is not declared - (see examples.py `ExtEnumOne` and `ExtEnumTwo`) - - late init hooking (see examples.py `HookedEnum`) - - enum modifications protection (see examples.py comment after `ExtendedEnum`) - """ - - # pylint: disable=bad-mcs-classmethod-argument,protected-access,too-many-locals - # pylint: disable=too-many-branches - def __new__(mcs, name, bases, namespace: Dict[Text, Any]): - attributes: List[Text] = [ - k - for k in namespace.keys() - if (not k.startswith("_") and k.isupper()) - ] - attributes += [ - k - for k, v in namespace.get("__annotations__", {}).items() - if (not k.startswith("_") and k.isupper() and v == name) - ] - light_val = 0 + int(not bool(namespace.get("_ZERO_VALUED"))) - for attr in attributes: - if attr in namespace: - continue - else: - namespace[attr] = light_val - light_val += 1 - - __itemsize__ = 0 - for bcls in bases: - if bcls is type: - continue - __itemsize__ = max(__itemsize__, bcls.__itemsize__) - - if not __itemsize__: - __slots__ = set(namespace.get("__slots__", tuple())) | { - "name", - "value", - "_value_to_instance_map", - "_base_typed", - } - namespace["__slots__"] = tuple(__slots__) - namespace["__new__"] = FastEnum.__new - - if "__init__" not in namespace: - namespace["__init__"] = _resolve_init(bases) or mcs.__init - if "__annotations__" not in namespace: - __annotations__ = dict(name=Text, value=Any) - for k in attributes: - __annotations__[k] = name - namespace["__annotations__"] = __annotations__ - namespace["__dir__"] = partial( - FastEnum.__dir, bases=bases, namespace=namespace - ) - typ = type.__new__(mcs, name, bases, namespace) - if attributes: - typ._value_to_instance_map = {} - for instance_name in attributes: - val = namespace[instance_name] - if not isinstance(val, tuple): - val = (val,) - if val[0] in typ._value_to_instance_map: - inst = typ._value_to_instance_map[val[0]] - else: - inst = typ(*val, name=instance_name) - typ._value_to_instance_map[inst.value] = inst - setattr(typ, instance_name, inst) - - # noinspection PyUnresolvedReferences - typ.__call__ = typ.__new__ = typ.get - del typ.__init__ - typ.__hash__ = mcs.__hash - typ.__eq__ = mcs.__eq - typ.__copy__ = mcs.__copy - typ.__deepcopy__ = mcs.__deepcopy - typ.__reduce__ = mcs.__reduce - if "__str__" not in namespace: - typ.__str__ = mcs.__str - if "__repr__" not in namespace: - typ.__repr__ = mcs.__repr - - if f"_{name}__init_late" in namespace: - fun = namespace[f"_{name}__init_late"] - for instance in typ._value_to_instance_map.values(): - fun(instance) - delattr(typ, f"_{name}__init_late") - - typ.__setattr__ = typ.__delattr__ = mcs.__restrict_modification - typ._finalized = True - return typ - - @staticmethod - def __new(cls, *values, **_): - __new__ = _resolve_new(cls.__bases__) - if __new__: - __new__, typ = __new__ - obj = __new__(cls, *values) - obj._base_typed = typ - return obj - - return object.__new__(cls) - - @staticmethod - def __init(instance, value: Any, name: Text): - base_val_type = getattr(instance, "_base_typed", None) - if base_val_type: - value = base_val_type(value) - instance.value = value - instance.name = name - - # pylint: disable=missing-docstring - @staticmethod - def get(typ, val=None): - # noinspection PyProtectedMember - if not isinstance(typ._value_to_instance_map, dict): - for cls in typ.mro(): - if cls is typ: - continue - if hasattr(cls, "_value_to_instance_map") and isinstance( - cls._value_to_instance_map, dict - ): - return cls._value_to_instance_map[val] - raise ValueError( - f"Value {val} is not found in this enum type declaration" - ) - # noinspection PyProtectedMember - member = typ._value_to_instance_map.get(val) - if member is None: - raise ValueError( - f"Value {val} is not found in this enum type declaration" - ) - return member - - @staticmethod - def __eq(val, other): - return isinstance(val, type(other)) and ( - val is other if type(other) is type(val) else val.value == other - ) - - def __hash(cls): - # noinspection PyUnresolvedReferences - return hash(cls.value) - - @staticmethod - def __restrict_modification(*a, **k): - raise TypeError( - f"Enum-like classes strictly prohibit changing any attribute/property" - f" after they are once set" - ) - - def __iter__(cls): - return iter(cls._value_to_instance_map.values()) - - def __setattr__(cls, key, value): - if hasattr(cls, "_finalized"): - cls.__restrict_modification() - super().__setattr__(key, value) - - def __delattr__(cls, item): - if hasattr(cls, "_finalized"): - cls.__restrict_modification() - super().__delattr__(item) - - def __getitem__(cls, item): - return getattr(cls, item) - - def has_value(cls, value): - return value in cls._value_to_instance_map - - def to_choices(cls): - return [(key, key) for key in cls._value_to_instance_map.keys()] - - def values(cls): - return cls._value_to_instance_map.keys() - - def key_description_list(cls): - result = [] - for key in cls._value_to_instance_map.keys(): - enum_key = "_".join( - re.sub( - "([A-Z][a-z]+)", r" \1", re.sub("([A-Z]+)", r" \1", key) - ).split() - ).upper() - result.append((key, cls[enum_key].description)) - return result - - # pylint: disable=unused-argument - # noinspection PyUnusedLocal,SpellCheckingInspection - def __deepcopy(cls, memodict=None): - return cls - - def __copy(cls): - return cls - - def __reduce(cls): - typ = type(cls) - # noinspection PyUnresolvedReferences - return typ.get, (typ, cls.value) - - @staticmethod - def __str(clz): - return f"{clz.__class__.__name__}.{clz.name}" - - @staticmethod - def __repr(clz): - return f"<{clz.__class__.__name__}.{clz.name}: {repr(clz.value)}>" - - def __dir__(self) -> Iterable[str]: - return [ - k - for k in super().__dir__() - if k not in ("_finalized", "_value_to_instance_map") - ] - - # def __choices__(self) -> Iterable[str]: - # return [()] - - @staticmethod - def __dir(bases, namespace, *_, **__): - keys = [ - k - for k in namespace.keys() - if k in ("__annotations__", "__module__", "__qualname__") - or not k.startswith("_") - ] - for bcls in bases: - keys.extend(dir(bcls)) - return list(set(keys)) - - -class KeyDescriptionEnum(metaclass=FastEnum): - description: Text - __slots__ = ("description",) - - def __init__(self, value, description, name): - # noinspection PyDunderSlots,PyUnresolvedReferences - self.value = value - # noinspection PyDunderSlots,PyUnresolvedReferences - self.name = name - self.description = description - - def describe(self): - return self.description diff --git a/src/agent/k8s-rest-agent/src/api/utils/jwt.py b/src/agent/k8s-rest-agent/src/api/utils/jwt.py deleted file mode 100644 index 54b7e0741..000000000 --- a/src/agent/k8s-rest-agent/src/api/utils/jwt.py +++ /dev/null @@ -1,30 +0,0 @@ -import logging - -from django.contrib.auth import get_user_model -from rest_framework import serializers - -User = get_user_model() -LOG = logging.getLogger(__name__) - - -class UserSerializer(serializers.ModelSerializer): - id = serializers.CharField(source="username") - - class Meta: - model = User - fields = ("id",) - extra_kwargs = {"id": {"validators": []}} - - -def jwt_response_payload_handler(token, user=None, request=None): - return { - "token": token, - "user": UserSerializer(user, context={"request": request}).data, - } - - -def jwt_get_username_from_payload_handler(payload): - """ - Override this function if username is formatted differently in payload - """ - return payload.get("sub") diff --git a/src/agent/k8s-rest-agent/src/api/utils/mixins.py b/src/agent/k8s-rest-agent/src/api/utils/mixins.py deleted file mode 100644 index 41becc97f..000000000 --- a/src/agent/k8s-rest-agent/src/api/utils/mixins.py +++ /dev/null @@ -1,12 +0,0 @@ -class PermissionsPerMethodMixin(object): - def get_permissions(self): - """ - Allows overriding default permissions with @permission_classes - """ - view = getattr(self, self.action) - if hasattr(view, "permission_classes"): - return [ - permission_class() - for permission_class in view.permission_classes - ] - return super().get_permissions() diff --git a/src/agent/k8s-rest-agent/src/api/utils/serializers.py b/src/agent/k8s-rest-agent/src/api/utils/serializers.py deleted file mode 100644 index 832e15231..000000000 --- a/src/agent/k8s-rest-agent/src/api/utils/serializers.py +++ /dev/null @@ -1,33 +0,0 @@ -import textwrap - -from rest_framework import serializers -from api.utils.enums import ErrorCode - - -class PaginationSerializer(serializers.Serializer): - page = serializers.IntegerField( - default=1, min_value=1, help_text="查询第几页" - ) - per_page = serializers.IntegerField( - default=10, - min_value=-1, - help_text="查询分页的每页数量, 如果为-1则不限制分页数量", - ) - limit = serializers.IntegerField( - min_value=1, help_text="限制最大数量", required=False - ) - - -class PaginationResultSerializer(serializers.Serializer): - total = serializers.IntegerField( - min_value=0, help_text="Total Number of result" - ) - - -class BadResponseSerializer(serializers.Serializer): - code = serializers.IntegerField( - help_text=textwrap.dedent(ErrorCode.get_info()) - ) - detail = serializers.CharField( - required=False, help_text="Error Messages", allow_blank=True - ) diff --git a/src/agent/k8s-rest-agent/src/api/views.py b/src/agent/k8s-rest-agent/src/api/views.py deleted file mode 100644 index 91ea44a21..000000000 --- a/src/agent/k8s-rest-agent/src/api/views.py +++ /dev/null @@ -1,3 +0,0 @@ -from django.shortcuts import render - -# Create your views here. diff --git a/src/agent/k8s-rest-agent/src/manage.py b/src/agent/k8s-rest-agent/src/manage.py deleted file mode 100755 index 4546cf051..000000000 --- a/src/agent/k8s-rest-agent/src/manage.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python -"""Django's command-line utility for administrative tasks.""" -import os -import sys - - -def main(): - os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server.settings") - try: - from django.core.management import execute_from_command_line - except ImportError as exc: - raise ImportError( - "Couldn't import Django. Are you sure it's installed and " - "available on your PYTHONPATH environment variable? Did you " - "forget to activate a virtual environment?" - ) from exc - execute_from_command_line(sys.argv) - - -if __name__ == "__main__": - main() diff --git a/src/agent/k8s-rest-agent/src/server/__init__.py b/src/agent/k8s-rest-agent/src/server/__init__.py deleted file mode 100644 index 0165ba0dd..000000000 --- a/src/agent/k8s-rest-agent/src/server/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from __future__ import absolute_import, unicode_literals - -# This will make sure the app is always imported when -# Django starts so that shared_task will use this app. -from .celery import app as celery_app - -__all__ = ("celery_app",) diff --git a/src/agent/k8s-rest-agent/src/server/asgi.py b/src/agent/k8s-rest-agent/src/server/asgi.py deleted file mode 100644 index 9fadff8ce..000000000 --- a/src/agent/k8s-rest-agent/src/server/asgi.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -ASGI config for server project. - -It exposes the ASGI callable as a module-level variable named ``application``. - -For more information on this file, see -https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ -""" - -import os - -from django.core.asgi import get_asgi_application - -os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server.settings") - -application = get_asgi_application() diff --git a/src/agent/k8s-rest-agent/src/server/celery.py b/src/agent/k8s-rest-agent/src/server/celery.py deleted file mode 100644 index 2393692e3..000000000 --- a/src/agent/k8s-rest-agent/src/server/celery.py +++ /dev/null @@ -1,15 +0,0 @@ -from __future__ import absolute_import, unicode_literals -import os -from celery import Celery - -# set the default Django settings module for the 'celery' program. -from django.conf import settings - -os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server.settings") - -app = Celery("server") - -app.config_from_object(settings, namespace="CELERY") - -# Load task modules from all registered Django app configs. -app.autodiscover_tasks() diff --git a/src/agent/k8s-rest-agent/src/server/settings.py b/src/agent/k8s-rest-agent/src/server/settings.py deleted file mode 100644 index fe064797b..000000000 --- a/src/agent/k8s-rest-agent/src/server/settings.py +++ /dev/null @@ -1,224 +0,0 @@ -""" -Django settings for server project. - -Generated by 'django-admin startproject' using Django 3.0.7. - -For more information on this file, see -https://docs.djangoproject.com/en/3.0/topics/settings/ - -For the full list of settings and their values, see -https://docs.djangoproject.com/en/3.0/ref/settings/ -""" - -import os -from django.utils.translation import gettext_lazy as _ - -WEBROOT = os.getenv("WEBROOT", "/") -WEBROOT = WEBROOT if WEBROOT != "/" else "" -DB_HOST = os.getenv("DB_HOST", "") -DB_PORT = int(os.getenv("DB_PORT", "5432")) -DB_NAME = os.getenv("DB_NAME", "") -DB_USER = os.getenv("DB_USER", "") -DB_PASSWORD = os.getenv("DB_PASSWORD", "") -DEBUG = os.getenv("DEBUG", "False") -DEBUG = DEBUG == "True" -# Build paths inside the project like this: os.path.join(BASE_DIR, ...) -BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -KEYCLOAK_PUBLIC_KEY = os.getenv("KEYCLOAK_PUBLIC_KEY", "") - - -# Quick-start development settings - unsuitable for production -# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ - -# SECURITY WARNING: keep the secret key used in production secret! -SECRET_KEY = "xdpfxz9)__^3azxs2(59$j&chmo#6&gi*pu3#wpt^$m!vff)0w" - -# SECURITY WARNING: don't run with debug turned on in production! -# DEBUG = True - -ALLOWED_HOSTS = ["*"] - - -# Application definition - -INSTALLED_APPS = [ - "jet", - "django.contrib.admin", - "django.contrib.auth", - "django.contrib.contenttypes", - "django.contrib.sessions", - "django.contrib.messages", - "django.contrib.staticfiles", - "rest_framework", - "server", - "api", - "drf_yasg", - "django_celery_beat", - "django_celery_results", -] - -MIDDLEWARE = [ - "django.middleware.security.SecurityMiddleware", - "django.contrib.sessions.middleware.SessionMiddleware", - "django.middleware.common.CommonMiddleware", - "django.middleware.csrf.CsrfViewMiddleware", - "django.contrib.auth.middleware.AuthenticationMiddleware", - "django.contrib.messages.middleware.MessageMiddleware", - "django.middleware.clickjacking.XFrameOptionsMiddleware", -] - -ROOT_URLCONF = "server.urls" - -TEMPLATES = [ - { - "BACKEND": "django.template.backends.django.DjangoTemplates", - "DIRS": [], - "APP_DIRS": True, - "OPTIONS": { - "context_processors": [ - "django.template.context_processors.debug", - "django.template.context_processors.request", - "django.contrib.auth.context_processors.auth", - "django.contrib.messages.context_processors.messages", - ], - }, - }, -] - -WSGI_APPLICATION = "server.wsgi.application" - - -# Password validation -# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators - -AUTH_PASSWORD_VALIDATORS = [ - { - "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", - }, - { - "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", - }, - { - "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", - }, - { - "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", - }, -] - - -# Internationalization -# https://docs.djangoproject.com/en/3.0/topics/i18n/ - -LANGUAGE_CODE = "zh-hans" -USE_I18N = True -USE_L10N = True - -LANGUAGES = [ - ("en", _("English")), - ("zh-hans", _("Simplified Chinese")), -] - -TIME_ZONE = "Asia/Shanghai" -USE_TZ = True - - -# Static files (CSS, JavaScript, Images) -# https://docs.djangoproject.com/en/3.0/howto/static-files/ - -STATIC_URL = WEBROOT + "/static/" -STATIC_ROOT = "/var/www/static" -MEDIA_ROOT = "/data/media" -MEDIA_URL = WEBROOT + "/media/" - -USE_X_FORWARDED_HOST = True -FORCE_SCRIPT_NAME = WEBROOT if WEBROOT != "" else "/" - -DATABASES = { - "default": { - "ENGINE": "django.db.backends.postgresql", - "NAME": DB_NAME, - "USER": DB_USER, - "PASSWORD": DB_PASSWORD, - "HOST": DB_HOST, - "PORT": DB_PORT, - } -} - -REST_FRAMEWORK = { - "DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.AcceptHeaderVersioning", - "DEFAULT_METADATA_CLASS": "rest_framework.metadata.SimpleMetadata", - "DEFAULT_PARSER_CLASSES": [ - "rest_framework.parsers.FormParser", - "rest_framework.parsers.MultiPartParser", - "rest_framework.parsers.JSONParser", - ], - "EXCEPTION_HANDLER": "api.utils.exception_handler.custom_exception_handler", -} - -SWAGGER_SETTINGS = { - "VALIDATOR_URL": None, - "DEFAULT_INFO": "server.urls.swagger_info", - "SECURITY_DEFINITIONS": { - "Bearer": {"type": "apiKey", "name": "Authorization", "in": "header"} - }, - "USE_SESSION_AUTH": False, -} - -CELERY_BROKER_URL = os.getenv("CELERY_BROKER_URL", "") -CELERY_RESULT_BACKEND = "django-db" -CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler" - -JWT_AUTH = { - "JWT_AUTH_HEADER_PREFIX": "Bearer", - "JWT_PUBLIC_KEY": """-----BEGIN PUBLIC KEY----- -%s ------END PUBLIC KEY-----""" - % KEYCLOAK_PUBLIC_KEY, - "JWT_ALGORITHM": "RS256", - "JWT_AUDIENCE": "account", - "JWT_PAYLOAD_GET_USERNAME_HANDLER": "api.utils.jwt.jwt_get_username_from_payload_handler", - "JWT_RESPONSE_PAYLOAD_HANDLER": "api.utils.jwt.jwt_response_payload_handler", -} - -AUTH_USER_MODEL = "api.User" -AUTH_PROFILE_MODULE = "api.Profile" - -LOGGING = { - "version": 1, - "disable_existing_loggers": False, - "formatters": { - "verbose": { - "format": "%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s" - }, - "simple": {"format": "%(levelname)s %(message)s"}, - }, - "handlers": { - "null": { - "level": "DEBUG", - "class": "logging.NullHandler", - }, - "console": { - "level": "DEBUG", - "class": "logging.StreamHandler", - "formatter": "simple", - }, - }, - "loggers": { - "django": { - "handlers": ["null"], - "propagate": True, - "level": "INFO", - }, - "django.request": { - "handlers": ["console"], - "level": "DEBUG", - "propagate": False, - }, - "api": { - "handlers": ["console"], - "level": "DEBUG", - "propagate": False, - }, - }, -} diff --git a/src/agent/k8s-rest-agent/src/server/urls.py b/src/agent/k8s-rest-agent/src/server/urls.py deleted file mode 100644 index e7d0a7139..000000000 --- a/src/agent/k8s-rest-agent/src/server/urls.py +++ /dev/null @@ -1,70 +0,0 @@ -"""server URL Configuration - -The `urlpatterns` list routes URLs to views. For more information please see: - https://docs.djangoproject.com/en/3.0/topics/http/urls/ -Examples: -Function views - 1. Add an import: from my_app import views - 2. Add a URL to urlpatterns: path('', views.home, name='home') -Class-based views - 1. Add an import: from other_app.views import Home - 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') -Including another URLconf - 1. Import the include() function: from django.urls import include, path - 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) -""" - -import os - -from api.routes.hello.views import HelloViewSet -from django.conf import settings -from django.contrib import admin -from django.urls import path, include -from drf_yasg import openapi -from drf_yasg.views import get_schema_view -from rest_framework import permissions -from rest_framework.routers import DefaultRouter - -DEBUG = getattr(settings, "DEBUG", False) -VERSION = os.getenv("API_VERSION", "v1") - -router = DefaultRouter(trailing_slash=False) -router.register("hello", HelloViewSet, basename="hello") - -router.include_root_view = False - -urlpatterns = router.urls - -swagger_info = openapi.Info( - title="Django Example API", - default_version=VERSION, - description=""" - Django Example API - """, -) - -SchemaView = get_schema_view( - info=swagger_info, - validators=["flex"], - public=True, - permission_classes=(permissions.AllowAny,), -) - -urlpatterns += [ - path("admin/", admin.site.urls), - path("jet/", include("jet.urls", "jet")), -] - -if DEBUG: - urlpatterns += [ - path( - "docs/", - SchemaView.with_ui("swagger", cache_timeout=0), - name="docs", - ), - path( - "redoc/", - SchemaView.with_ui("redoc", cache_timeout=0), - name="redoc", - ), - ] diff --git a/src/agent/k8s-rest-agent/src/server/wsgi.py b/src/agent/k8s-rest-agent/src/server/wsgi.py deleted file mode 100644 index 11efb9c4d..000000000 --- a/src/agent/k8s-rest-agent/src/server/wsgi.py +++ /dev/null @@ -1,16 +0,0 @@ -""" -WSGI config for server project. - -It exposes the WSGI callable as a module-level variable named ``application``. - -For more information on this file, see -https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/ -""" - -import os - -from django.core.wsgi import get_wsgi_application - -os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server.settings") - -application = get_wsgi_application() diff --git a/src/agent/k8s-rest-agent/supervisor/conf.d/server.conf b/src/agent/k8s-rest-agent/supervisor/conf.d/server.conf deleted file mode 100644 index f4f1ad384..000000000 --- a/src/agent/k8s-rest-agent/supervisor/conf.d/server.conf +++ /dev/null @@ -1,8 +0,0 @@ -[program:beat_task] -environment=C_FORCE_ROOT="yes" -command=celery -A server beat -l info -directory=/var/www/server/ -autostart=true -autorestart=true -stdout_logfile=/var/log/supervisor/server.log -redirect_stderr=true \ No newline at end of file diff --git a/src/agent/k8s-rest-agent/supervisor/supervisord.conf b/src/agent/k8s-rest-agent/supervisor/supervisord.conf deleted file mode 100644 index d6bf70c31..000000000 --- a/src/agent/k8s-rest-agent/supervisor/supervisord.conf +++ /dev/null @@ -1,28 +0,0 @@ -; supervisor config file - -[unix_http_server] -file=/var/run/supervisor.sock ; (the path to the socket file) -chmod=0700 ; sockef file mode (default 0700) - -[supervisord] -logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log) -pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid) -childlogdir=/var/log/supervisor ; ('AUTO' child log dir, default $TEMP) - -; the below section must remain in the config file for RPC -; (supervisorctl/web interface) to work, additional interfaces may be -; added by defining them in separate rpcinterface: sections -[rpcinterface:supervisor] -supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface - -[supervisorctl] -serverurl=unix:///var/run/supervisor.sock ; use a unix:// URL for a unix socket - -; The [include] section can just contain the "files" setting. This -; setting can list multiple files (separated by whitespace or -; newlines). It can also contain wildcards. The filenames are -; interpreted as relative to this file. Included files *cannot* -; include files themselves. - -[include] -files = /etc/supervisor/conf.d/*.conf \ No newline at end of file diff --git a/src/agent/k8s-rest-agent/uwsgi/server.ini b/src/agent/k8s-rest-agent/uwsgi/server.ini deleted file mode 100644 index 72607ef42..000000000 --- a/src/agent/k8s-rest-agent/uwsgi/server.ini +++ /dev/null @@ -1,71 +0,0 @@ -[uwsgi] -module = $(UWSGI_MODULE) -processes = $(UWSGI_PROCESSES) -threads = $(UWSGI_THREADS) -procname-prefix-spaced = uwsgi: $(UWSGI_MODULE) - -http-socket = :80 -http-enable-proxy-protocol = 1 -http-auto-chunked = true -http-keepalive = 75 -http-timeout = 75 -stats = :1717 -stats-http = 1 -offload-threads = $(UWSGI_OFFLOAD_THREADS) - -# Better startup/shutdown in docker: -die-on-term = 1 -lazy-apps = 0 - -vacuum = 1 -master = 1 -enable-threads = true -thunder-lock = 1 -buffer-size = 65535 - -# Logging -log-x-forwarded-for = true -#memory-report = true -#disable-logging = true -#log-slow = 200 -#log-date = true - -# Avoid errors on aborted client connections -ignore-sigpipe = true -ignore-write-errors = true -disable-write-exception = true - -#listen=1000 -#max-fd=120000 -no-defer-accept = 1 - -# Limits, Kill requests after 120 seconds -harakiri = 120 -harakiri-verbose = true -post-buffering = 4096 - -# Custom headers -add-header = X-Content-Type-Options: nosniff -add-header = X-XSS-Protection: 1; mode=block -add-header = Strict-Transport-Security: max-age=16070400 -add-header = Connection: Keep-Alive - -# Static file serving with caching headers and gzip -static-map = /static=/var/www/static -static-map = /media=/data/media -static-safe = /usr/local/lib/python3.7/site-packages/ -static-safe = /var/www/static/ -static-gzip-dir = /var/www/static/ -static-expires = /var/www/static/CACHE/* 2592000 -static-expires = /data/media/cache/* 2592000 -static-expires = /var/www/static/frontend/img/* 2592000 -static-expires = /var/www/static/frontend/fonts/* 2592000 -static-expires = /var/www/* 3600 -route-uri = ^/static/ addheader:Vary: Accept-Encoding -error-route-uri = ^/static/ addheader:Cache-Control: no-cache - -# Cache stat() calls -cache2 = name=statcalls,items=30 -static-cache-paths = 86400 - -touch-reload = /tmp/server.txt \ No newline at end of file diff --git a/src/agent/kubernetes-agent/Dockerfile b/src/agent/kubernetes-agent/Dockerfile deleted file mode 100644 index 911b5fcd4..000000000 --- a/src/agent/kubernetes-agent/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM python:3.6 - -COPY requirements.txt / -RUN pip install -r /requirements.txt -RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x kubectl && \ - mv kubectl /usr/local/bin/kubectl - -COPY src /app - -WORKDIR /app - -ENV KUBECONFIG /app/.kube/config -ENV PYTHONPATH /app:$PATHONPATH - -CMD python main.py diff --git a/src/agent/kubernetes-agent/requirements.txt b/src/agent/kubernetes-agent/requirements.txt deleted file mode 100644 index e98ba28a9..000000000 --- a/src/agent/kubernetes-agent/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -kubernetes<=9.0.0 -requests<=2.32.3 diff --git a/src/agent/kubernetes-agent/src/__init__.py b/src/agent/kubernetes-agent/src/__init__.py deleted file mode 100644 index 0480730a5..000000000 --- a/src/agent/kubernetes-agent/src/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# diff --git a/src/agent/kubernetes-agent/src/main.py b/src/agent/kubernetes-agent/src/main.py deleted file mode 100644 index 4efbce25f..000000000 --- a/src/agent/kubernetes-agent/src/main.py +++ /dev/null @@ -1,13 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -from operations import create_node, delete_node, fabric_ca_register -from utils.env import OPERATION, AgentOperation - -if __name__ == "__main__": - if OPERATION == AgentOperation.Create.value: - create_node() - elif OPERATION == AgentOperation.Delete.value: - delete_node() - elif OPERATION == AgentOperation.FabricCARegister.value: - fabric_ca_register() diff --git a/src/agent/kubernetes-agent/src/network/__init__.py b/src/agent/kubernetes-agent/src/network/__init__.py deleted file mode 100644 index a5270fa8c..000000000 --- a/src/agent/kubernetes-agent/src/network/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -from .fabric import FabricNetwork diff --git a/src/agent/kubernetes-agent/src/network/fabric/__init__.py b/src/agent/kubernetes-agent/src/network/fabric/__init__.py deleted file mode 100644 index a5d51955e..000000000 --- a/src/agent/kubernetes-agent/src/network/fabric/__init__.py +++ /dev/null @@ -1,283 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -import os -import logging -from utils.env import ( - PEER_CONFIG, - CA_CONFIG, - CA_ADMIN_NAME, - CA_ADMIN_PASSWORD, - AGENT_IP, - FabricNodeType, - FabricImages, -) - -LOG = logging.getLogger(__name__) -CA_HOSTS = CA_CONFIG.get("hosts", "").split(",") -# Set fabric ca hosts from agent ip and user customize hosts. -CA_HOSTS.append(AGENT_IP) - - -class FabricNetwork(object): - def __init__(self, *args, **kwargs): - self._version = kwargs.get("version") - self._type = kwargs.get("node_type") - self._agent_id = kwargs.get("agent_id") - self._node_id = kwargs.get("node_id") - self._deploy_name = "deploy-%s" % str(self._node_id) - self._service_name = "service-%s" % str(self._node_id) - self._ingress_name = "ingress-%s" % str(self._node_id) - self._container_image = "" - self._container_environments = None - self._container_command = None - self._container_command_args = None - self._initial_containers = None - self._container_volume_mounts = None - self._containers = None - self._initial_containers = None - self._volumes = None - - if self._type == FabricNodeType.Ca.value: - self._container_ports = [7054] - self._service_ports = [{"port": 7054, "name": "server"}] - self._image_name = "%s:%s" % (FabricImages.Ca.value, self._version) - self._pod_name = "ca-server" - self._init_ca_deployment() - elif self._type == FabricNodeType.Peer.value: - self._container_ports = [7051, 7052] - self._service_ports = [ - {"port": 7051, "name": "server"}, - {"port": 7052, "name": "grpc"}, - ] - self._image_name = "%s:%s" % ( - FabricImages.Peer.value, - self._version, - ) - self._pod_name = "peer" - self._init_peer_deployment() - else: - self._container_ports = [] - self._service_ports = [] - self._image_name = "" - self._pod_name = "" - - def _init_ca_deployment(self): - self._container_environments = [ - { - "name": "FABRIC_CA_HOME", - "value": "/etc/hyperledger/fabric-ca-server", - }, - { - "name": "FABRIC_CA_SERVER_HOME", - "value": "/etc/hyperledger/fabric-ca-server/crypto", - }, - {"name": "FABRIC_CA_SERVER_TLS_ENABLED", "value": "true"}, - { - "name": "FABRIC_CA_SERVER_CSR_HOSTS", - "value": ",".join(CA_HOSTS), - }, - ] - self._container_command = ["fabric-ca-server"] - self._container_command_args = [ - "start", - "-b", - "%s:%s" % (CA_ADMIN_NAME, CA_ADMIN_PASSWORD), - "-d", - ] - - def _init_peer_deployment(self): - gossip_use_leader_reflection = PEER_CONFIG.get( - "gossip_use_leader_reflection", True - ) - gossip_org_leader = PEER_CONFIG.get("gossip_org_leader", False) - gossip_skip_handshake = PEER_CONFIG.get("gossip_skip_handshake", True) - name = PEER_CONFIG.get("name") - local_msp_id = PEER_CONFIG.get("local_msp_id") - ca_list = PEER_CONFIG.get("ca_list", []) - - initial_container_work_dir = "/work-dir" - # TODO: find a policy for peer directory definition - initial_container_environments = [ - { - "name": "FABRIC_CA_CLIENT_HOME", - "value": "%s/hyperledger/org1/peer1" - % initial_container_work_dir, - }, - {"name": "PEER_NAME", "value": name}, - ] - for ca_node in ca_list: - ca_address = ca_node.get("address") - ca_certificate_url = ca_node.get("certificate") - ca_certificate_file_name = ca_certificate_url.split("/")[-1] - ca_certificate_file_type = ( - "archive" - if ca_certificate_file_name.endswith((".tgz", "tar.gz")) - else "file" - ) - ca_type = ca_node.get("type").upper() - users = ca_node.get("users", []) - ca_environments = [ - {"name": "%s_CA_ADDRESS" % ca_type, "value": ca_address}, - { - "name": "%s_CA_CERTIFICATE_URL" % ca_type, - "value": ca_certificate_url, - }, - { - "name": "%s_CA_CERTIFICATE_FILE_NAME" % ca_type, - "value": ca_certificate_file_name, - }, - { - "name": "%s_CA_CERTIFICATE_FILE_TYPE" % ca_type, - "value": ca_certificate_file_type, - }, - ] - for user in users: - user_type = user.get("type").upper() - username = user.get("username") - password = user.get("password") - ca_environments += [ - { - "name": "%s_%s_USER_NAME" % (ca_type, user_type), - "value": username, - }, - { - "name": "%s_%s_USER_PASSWORD" % (ca_type, user_type), - "value": password, - }, - ] - initial_container_environments = ( - initial_container_environments + ca_environments - ) - - initial_container_command = ["bash", "-c"] - script_file_path = os.path.join( - os.path.dirname(os.path.abspath(__file__)), "peer_initial_ca.sh" - ) - # read script for peer initial - with open(script_file_path, "r") as initial_ca_script: - shell_script = initial_ca_script.read() - initial_container_command_args = [shell_script] - ca_image = "%s:%s" % (FabricImages.Ca.value, self._version) - - self._initial_containers = [ - { - "image": ca_image, - "environments": initial_container_environments, - "name": "initial-ca", - "command": initial_container_command, - "command_args": initial_container_command_args, - "volume_mounts": [{"name": "workdir", "path": "/work-dir"}], - } - ] - self._volumes = [ - {"name": "workdir", "empty_dir": {}}, - {"name": "docker-run", "host_path": "/var/run"}, - ] - self._container_environments = [ - {"name": "CORE_PEER_ID", "value": name}, - {"name": "CORE_PEER_LOCALMSPID", "value": local_msp_id}, - { - "name": "CORE_PEER_MSPCONFIGPATH", - "value": "/work-dir/hyperledger/org1/peer1/msp", - }, - { - "name": "CORE_VM_ENDPOINT", - "value": "unix:///host/var/run/docker.sock", - }, - { - "name": "CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE", - "value": "guide_fabric-ca", - }, - {"name": "FABRIC_LOGGING_SPEC", "value": "debug"}, - {"name": "CORE_PEER_TLS_ENABLED", "value": "true"}, - { - "name": "CORE_PEER_TLS_CERT_FILE", - "value": "/work-dir/hyperledger/org1/peer1/tls-msp/signcerts/cert.pem", - }, - { - "name": "CORE_PEER_TLS_KEY_FILE", - "value": "/work-dir/hyperledger/org1/peer1/tls-msp/keystore/key.pem", - }, - { - "name": "CORE_PEER_GOSSIP_USELEADERELECTION", - "value": "true" if gossip_use_leader_reflection else "false", - }, - { - "name": "CORE_PEER_GOSSIP_ORGLEADER", - "value": "true" if gossip_org_leader else "false", - }, - { - "name": "CORE_PEER_GOSSIP_SKIPHANDSHAKE", - "value": "true" if gossip_skip_handshake else "false", - }, - { - "name": "CORE_PEER_TLS_ROOTCERT_FILE", - "value": "/work-dir/hyperledger/org1/peer1/tls-msp/tlscacerts/tls.pem", - }, - ] - self._container_volume_mounts = [ - {"name": "workdir", "path": "/work-dir"}, - {"name": "docker-run", "path": "/host/var/run"}, - ] - - # def _generate_ingress(self): - # ingress_name = "ingress-%s" % str(self._node_id) - # annotations = {"nginx.ingress.kubernetes.io/ssl-redirect": "false"} - # if self._type == FabricNodeType.Ca.name.lower(): - # ingress_paths = [ - # {"port": 7054, "path": "/%s" % str(self._node_id)} - # ] - # else: - # ingress_paths = [] - # - # return { - # "name": ingress_name, - # "service_name": self._service_name, - # "ingress_paths": ingress_paths, - # "annotations": annotations, - # } - - def add_environments(self, environments=None): - if environments is None: - environments = [] - - self._container_environments += environments - - def deployment(self): - deployment = {"name": self._deploy_name} - if self._volumes is not None: - deployment.update({"volumes": self._volumes}) - if self._initial_containers is not None: - deployment.update({"initial_containers": self._initial_containers}) - container_dict = { - "image": self._image_name, - "name": self._pod_name, - "ports": self._container_ports, - } - if self._container_environments is not None: - container_dict.update( - {"environments": self._container_environments} - ) - if self._container_volume_mounts is not None: - container_dict.update( - {"volume_mounts": self._container_volume_mounts} - ) - if self._container_command is not None: - container_dict.update({"command": self._container_command}) - if self._container_command_args is not None: - container_dict.update( - {"command_args": self._container_command_args} - ) - containers = [container_dict] - deployment.update({"containers": containers}) - - return deployment - - def service(self): - return { - "name": self._service_name, - "ports": self._service_ports, - "selector": {"app": self._deploy_name}, - "service_type": "NodePort", - } diff --git a/src/agent/kubernetes-agent/src/network/fabric/peer_initial_ca.sh b/src/agent/kubernetes-agent/src/network/fabric/peer_initial_ca.sh deleted file mode 100644 index 314760420..000000000 --- a/src/agent/kubernetes-agent/src/network/fabric/peer_initial_ca.sh +++ /dev/null @@ -1,22 +0,0 @@ -cd /work-dir; -mkdir tls; -wget -c $TLS_CA_CERTIFICATE_URL; -if [ "$TLS_CA_CERTIFICATE_FILE_TYPE" = "archive" ]; then tar -zxf $TLS_CA_CERTIFICATE_FILE_NAME -C tls --strip-components 1; else cp $TLS_CA_CERTIFICATE_FILE_NAME tls/ca-cert.pem; fi; -rm -rf $$TLS_CA_CERTIFICATE_FILE_NAME; -mkdir signature; -wget -c $SIGNATURE_CA_CERTIFICATE_URL; -if [ "$SIGNATURE_CA_CERTIFICATE_FILE_TYPE" = "archive" ]; then tar -zxf $SIGNATURE_CA_CERTIFICATE_FILE_NAME -C signature --strip-components 1; else cp $SIGNATURE_CA_CERTIFICATE_FILE_NAME signature/ca-cert.pem; fi; -rm -rf $SIGNATURE_CA_CERTIFICATE_FILE_NAME; -export FABRIC_CA_CLIENT_TLS_CERTFILES=/work-dir/signature/ca-cert.pem; -fabric-ca-client enroll -d -u https://$SIGNATURE_PEER_USER_NAME:$SIGNATURE_PEER_USER_PASSWORD@$SIGNATURE_CA_ADDRESS; -export FABRIC_CA_CLIENT_MSPDIR=tls-msp; -export FABRIC_CA_CLIENT_TLS_CERTFILES=/work-dir/tls/ca-cert.pem; -fabric-ca-client enroll -d -u https://$TLS_PEER_USER_NAME:$TLS_PEER_USER_PASSWORD@$TLS_CA_ADDRESS --enrollment.profile tls --csr.hosts $PEER_NAME; -cd /work-dir/hyperledger/org1/peer1/tls-msp/keystore && mv * key.pem; -cd /work-dir/hyperledger/org1/peer1/tls-msp/tlscacerts && mv * tls.pem; -export FABRIC_CA_CLIENT_HOME=/work-dir/hyperledger/org1/admin; -export FABRIC_CA_CLIENT_TLS_CERTFILES=/work-dir/signature/ca-cert.pem; -export FABRIC_CA_CLIENT_MSPDIR=msp; -fabric-ca-client enroll -d -u https://$SIGNATURE_USER_USER_NAME:$SIGNATURE_USER_USER_PASSWORD@$SIGNATURE_CA_ADDRESS; -mkdir -p /work-dir/hyperledger/org1/peer1/msp/admincerts; -cp /work-dir/hyperledger/org1/admin/msp/signcerts/cert.pem /work-dir/hyperledger/org1/peer1/msp/admincerts/org1-admin-cert.pem; diff --git a/src/agent/kubernetes-agent/src/operations/__init__.py b/src/agent/kubernetes-agent/src/operations/__init__.py deleted file mode 100644 index b59d71e00..000000000 --- a/src/agent/kubernetes-agent/src/operations/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -from .create_node import create_node -from .delete_node import delete_node -from .fabric_ca_register import fabric_ca_register diff --git a/src/agent/kubernetes-agent/src/operations/create_node.py b/src/agent/kubernetes-agent/src/operations/create_node.py deleted file mode 100644 index 4cfd41928..000000000 --- a/src/agent/kubernetes-agent/src/operations/create_node.py +++ /dev/null @@ -1,161 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -import json -import subprocess -import tarfile -from time import sleep - -import requests - -from network import FabricNetwork -from utils.env import ( - NODE_ID, - NODE_DETAIL_URL, - AGENT_ID, - AGENT_IP, - NETWORK_VERSION, - NODE_TYPE, - MAX_QUERY_RETRY, - headers, - NodeStatus, - NODE_UPLOAD_FILE_URL, - NETWORK_TYPE, - NetworkType, - FabricNodeType, -) -from utils import get_k8s_client - - -def _upload_ca_crypto(pod): - copy_cmd = [ - "kubectl", - "cp", - "%s/%s:/etc/hyperledger/fabric-ca-server/crypto" - % (AGENT_ID, pod.metadata.name), - "crypto", - ] - subprocess.call(copy_cmd) - crypto_tar_file = "crypto.tgz" - tf = tarfile.open(crypto_tar_file, mode="w:gz") - tf.add("crypto") - tf.close() - - files = {"file": open(crypto_tar_file, "rb")} - del headers["Content-Type"] - r = requests.post(NODE_UPLOAD_FILE_URL, headers=headers, files=files) - - -def _generate_peer_env_from_ports(ports=None): - if ports is None: - ports = [] - - environments = [] - for port in ports: - internal_port = port.get("internal") - external_port = port.get("external") - if internal_port == 7051: - environments += [ - { - "name": "CORE_PEER_ADDRESS", - "value": "%s:%s" % (AGENT_IP, external_port), - }, - { - "name": "CORE_PEER_GOSSIP_EXTERNALENDPOINT", - "value": "%s:%s" % (AGENT_IP, external_port), - }, - ] - elif internal_port == 7052: - environments += [ - { - "name": "CORE_PEER_CHAINCODEADDRESS", - "value": "%s:%s" % (AGENT_IP, external_port), - }, - { - "name": "CORE_PEER_CHAINCODELISTENADDRESS", - "value": "0.0.0.0:%s" % external_port, - }, - ] - - return environments - - -def _create_fabric_node(): - k8s_client = get_k8s_client() - - network = FabricNetwork( - version=NETWORK_VERSION, - node_type=NODE_TYPE, - agent_id=AGENT_ID, - node_id=NODE_ID, - ) - - service = network.service() - - deploy_name = None - ports = [] - new_environments = [] - if service: - success, service_response = k8s_client.create_service( - AGENT_ID, **service - ) - if service.get("service_type") == "NodePort" and success: - ports = service_response.spec.ports - ports = [ - {"external": port.node_port, "internal": port.port} - for port in ports - ] - if NODE_TYPE == FabricNodeType.Peer.value: - new_environments = _generate_peer_env_from_ports(ports) - - # add new environments depend on service result - if len(new_environments) > 0: - network.add_environments(new_environments) - - deployment = network.deployment() - for key, value in deployment.items(): - print(key, value) - if deployment: - k8s_client.create_deployment(AGENT_ID, **deployment) - deploy_name = deployment.get("name") - # if service: - # success, service_response = k8s_client.create_service( - # AGENT_ID, **service - # ) - # if service.get("service_type") == "NodePort" and success: - # ports = service_response.spec.ports - # ports = [ - # {"external": port.node_port, "internal": port.port} - # for port in ports - # ] - # if ingress: - # k8s_client.create_ingress(AGENT_ID, **ingress) - # - # The pod of node deployed in kubernetes - pod = None - # Query pod status if is Running - node_status = NodeStatus.Error.value - for i in range(1, MAX_QUERY_RETRY): - pod = k8s_client.get_pod(AGENT_ID, deploy_name) - if pod and pod.status.phase == "Running": - node_status = NodeStatus.Running.value - break - sleep(5) - - # Update node status - ret = requests.put( - url=NODE_DETAIL_URL, - headers=headers, - data=json.dumps({"status": node_status, "ports": ports}), - ) - - if node_status == NodeStatus.Running.value: - # if deploy success and node type is ca, - # will upload the crypto files to api engine - if NODE_TYPE == "ca": - _upload_ca_crypto(pod) - - -def create_node(): - if NETWORK_TYPE == NetworkType.Fabric.value: - _create_fabric_node() diff --git a/src/agent/kubernetes-agent/src/operations/delete_node.py b/src/agent/kubernetes-agent/src/operations/delete_node.py deleted file mode 100644 index 4f4779bcb..000000000 --- a/src/agent/kubernetes-agent/src/operations/delete_node.py +++ /dev/null @@ -1,65 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -import json -from time import sleep - -import requests - -from network import FabricNetwork -from utils.env import ( - AGENT_ID, - NODE_TYPE, - NETWORK_VERSION, - NODE_ID, - NETWORK_TYPE, - NetworkType, - NODE_DETAIL_URL, - headers, - MAX_QUERY_RETRY, -) -from utils import get_k8s_client - - -def _delete_fabric_node(): - k8s_client = get_k8s_client() - - network = FabricNetwork( - version=NETWORK_VERSION, - node_type=NODE_TYPE, - agent_id=AGENT_ID, - node_id=NODE_ID, - ) - deployment = network.deployment() - service = network.service() - # config = network.generate_config() - # - # deployment = config.get("deployment") - # service = config.get("service") - # ingress = config.get("ingress") - - deploy_name = None - if service: - k8s_client.delete_service(namespace=AGENT_ID, name=service.get("name")) - if deployment: - k8s_client.delete_deployment( - namespace=AGENT_ID, name=deployment.get("name") - ) - deploy_name = deployment.get("name") - - for i in range(1, MAX_QUERY_RETRY): - pod = k8s_client.get_pod(AGENT_ID, deploy_name) - if pod is None: - requests.put( - url=NODE_DETAIL_URL, - headers=headers, - data=json.dumps({"status": "deleted"}), - ) - requests.delete(url=NODE_DETAIL_URL, headers=headers) - break - sleep(5) - - -def delete_node(): - if NETWORK_TYPE == NetworkType.Fabric.value: - _delete_fabric_node() diff --git a/src/agent/kubernetes-agent/src/operations/fabric_ca_register.py b/src/agent/kubernetes-agent/src/operations/fabric_ca_register.py deleted file mode 100644 index dc10eee6e..000000000 --- a/src/agent/kubernetes-agent/src/operations/fabric_ca_register.py +++ /dev/null @@ -1,82 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -from time import sleep -import os -from uuid import uuid4 -from utils.env import ( - NODE_FILE_URL, - FABRIC_CA_USER, - SERVICE_PORTS, - CA_ADMIN_NAME, - CA_ADMIN_PASSWORD, - AGENT_IP, - USER_PATCH_URL, - FabricImages, - TOKEN, - AGENT_ID, - NETWORK_VERSION, - MAX_QUERY_RETRY, -) -from utils import get_k8s_client - - -def fabric_ca_register(): - if NODE_FILE_URL: - ca_service_port = SERVICE_PORTS.get("7054") - pod_environments = [ - {"name": "NODE_FILE_URL", "value": NODE_FILE_URL}, - {"name": "CA_ADMIN_NAME", "value": CA_ADMIN_NAME}, - {"name": "CA_ADMIN_PASSWORD", "value": CA_ADMIN_PASSWORD}, - {"name": "CA_USER_NAME", "value": FABRIC_CA_USER.get("name")}, - { - "name": "CA_USER_PASSWORD", - "value": FABRIC_CA_USER.get("secret"), - }, - {"name": "CA_USER_TYPE", "value": FABRIC_CA_USER.get("type")}, - { - "name": "CA_USER_ATTRS", - "value": FABRIC_CA_USER.get("attrs", ""), - }, - {"name": "TOKEN", "value": TOKEN}, - {"name": "USER_PATCH_URL", "value": USER_PATCH_URL}, - { - "name": "FABRIC_CA_CLIENT_TLS_CERTFILES", - "value": "/tmp/crypto/ca-cert.pem", - }, - {"name": "FABRIC_CA_CLIENT_HOME", "value": "/tmp/admin"}, - { - "name": "CA_SERVER", - "value": "%s:%s" % (AGENT_IP, ca_service_port), - }, - ] - pod_command = ["bash", "-c"] - script_file_path = os.path.join( - os.path.dirname(os.path.abspath(__file__)), "fabric_ca_register.sh" - ) - with open(script_file_path, "r") as initial_ca_script: - shell_script = initial_ca_script.read() - pod_command_args = [shell_script] - job_name = "register-ca-%s" % uuid4().hex - template = { - "name": job_name, - "containers": [ - { - "name": "register", - "image": "%s:%s" - % (FabricImages.Ca.value, NETWORK_VERSION), - "command": pod_command, - "command_args": pod_command_args, - "environments": pod_environments, - } - ], - } - client = get_k8s_client() - client.create_job(AGENT_ID, **template) - - for i in range(1, MAX_QUERY_RETRY): - pod = client.get_pod(AGENT_ID, job_name) - if pod.status.phase == "Succeeded": - client.delete_job(AGENT_ID, job_name) - break - sleep(5) diff --git a/src/agent/kubernetes-agent/src/operations/fabric_ca_register.sh b/src/agent/kubernetes-agent/src/operations/fabric_ca_register.sh deleted file mode 100755 index 1d1cb1c34..000000000 --- a/src/agent/kubernetes-agent/src/operations/fabric_ca_register.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash -# shellcheck disable=SC2164 -cd /tmp; -wget "${NODE_FILE_URL}"; -# shellcheck disable=SC2034 -NODE_FILE="${NODE_FILE_URL##*/}" -tar -zxf "${NODE_FILE}" -fabric-ca-client enroll -d -u https://"${CA_ADMIN_NAME}":"${CA_ADMIN_PASSWORD}"@"${CA_SERVER}" -if [ "${CA_USER_ATTRS}" -eq "" ]; then - fabric-ca-client register -d --id.name "${CA_USER_NAME}" --id.secret "${CA_USER_PASSWORD}" --id.type "${CA_USER_TYPE}" -else - fabric-ca-client register -d --id.name "${CA_USER_NAME}" --id.secret "${CA_USER_PASSWORD}" --id.type "${CA_USER_TYPE}" --id.attrs "${CA_USER_ATTRS}" -fi -# shellcheck disable=SC2034 -# shellcheck disable=SC2181 -if [ $? -eq 0 ]; then - # shellcheck disable=SC2034 - user_status="registered" -else - # shellcheck disable=SC2034 - user_status="fail" -fi -wget --method=PATCH --body-data "{\"status\": \"${user_status}\"}" --header "Authorization: JWT ${TOKEN}" --header "Content-Type: application/json" "${USER_PATCH_URL}" diff --git a/src/agent/kubernetes-agent/src/utils/__init__.py b/src/agent/kubernetes-agent/src/utils/__init__.py deleted file mode 100644 index 1f11a21ce..000000000 --- a/src/agent/kubernetes-agent/src/utils/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -import os -import zipfile -from .env import AGENT_CONFIG_FILE, AGENT_ID -from .download import download_file -from .client import KubernetesClient - - -def prepare_config(): - config_path = "/app/.kube/config" - - if os.path.exists(config_path): - return config_path - - config_file = download_file(AGENT_CONFIG_FILE, "/tmp") - ext = os.path.splitext(config_file)[-1].lower() - - if ext == ".zip": - with zipfile.ZipFile(config_file, "r") as zip_ref: - zip_ref.extractall("/app") - - return config_path - - -def get_k8s_client(): - k8s_config = prepare_config() - - k8s_client = KubernetesClient(config_file=k8s_config) - k8s_client.get_or_create_namespace(name=AGENT_ID) - - return k8s_client diff --git a/src/agent/kubernetes-agent/src/utils/client.py b/src/agent/kubernetes-agent/src/utils/client.py deleted file mode 100644 index 734534676..000000000 --- a/src/agent/kubernetes-agent/src/utils/client.py +++ /dev/null @@ -1,373 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -import logging - -from kubernetes import client, config -from kubernetes.client.rest import ApiException - -LOG = logging.getLogger(__name__) - - -class KubernetesClient(object): - def __init__(self, config_file=None): - super(KubernetesClient, self).__init__() - self._config_file = config_file - config.load_kube_config(config_file) - - self._major, self._minor = self._get_version_code() - - def list_pods(self): - v1 = client.CoreV1Api() - print("Listing pods with their IPs:") - ret = v1.list_pod_for_all_namespaces(watch=False) - for i in ret.items: - print( - "%s\t%s\t%s" - % (i.status.pod_ip, i.metadata.namespace, i.metadata.name) - ) - - def _get_version_code(self): - version_api = client.VersionApi() - major = 1 - minor = 16 - - try: - api_response = version_api.get_code() - major = int(api_response.major) - minor = int(api_response.minor) - except ApiException as e: - LOG.error("Exception when calling VersionApi->get_code: %s", e) - - return major, minor - - def get_pod(self, namespace=None, deploy_name=None): - v1 = client.CoreV1Api() - pod = None - try: - api_response = v1.list_namespaced_pod( - namespace, label_selector="app=%s" % deploy_name - ) - except ApiException as e: - LOG.error( - "Exception when calling CoreV1Api->list_namespaced_pod: %s", e - ) - else: - for item in api_response.items: - pod_name = item.metadata.name - pod = item - if pod_name.startswith(deploy_name): - break - - return pod - - def get_or_create_namespace(self, name=None): - if name: - v1 = client.CoreV1Api() - try: - v1.read_namespace(name=name) - except ApiException: - body = client.V1Namespace( - kind="Namespace", - api_version="v1", - metadata=client.V1ObjectMeta(name=name), - ) - try: - v1.create_namespace(body=body) - except ApiException as e: - LOG.error( - "Exception when calling CoreV1Api->read_namespace: %s", - e, - ) - - def _generate_container_pods(self, containers=None): - if containers is None or len(containers) == 0: - return None - - container_pods = [] - for container in containers: - ports = container.get("ports", []) - environments = container.get("environments", []) - command = container.get("command", []) - command_args = container.get("command_args", []) - volume_mounts = container.get("volume_mounts") - volume_mounts = ( - [ - client.V1VolumeMount( - mount_path=volume_mount.get("path"), - name=volume_mount.get("name"), - ) - for volume_mount in volume_mounts - ] - if volume_mounts - else None - ) - - environments = [ - client.V1EnvVar(name=env.get("name"), value=env.get("value")) - for env in environments - ] - ports = [ - client.V1ContainerPort(container_port=port) for port in ports - ] - container_parameter = { - "name": container.get("name"), - "image": container.get("image"), - "image_pull_policy": "IfNotPresent", - } - if environments is not None and len(environments) > 0: - container_parameter.update({"env": environments}) - if command is not None and len(command) > 0: - container_parameter.update({"command": command}) - if command_args is not None and len(command_args) > 0: - container_parameter.update({"args": command_args}) - if ports is not None and len(ports) > 0: - container_parameter.update({"ports": ports}) - if volume_mounts is not None and len(volume_mounts) > 0: - container_parameter.update({"volume_mounts": volume_mounts}) - container_pods.append(client.V1Container(**container_parameter)) - - return container_pods - - def _generate_pod_template(self, *args, **kwargs): - containers = kwargs.get("containers", []) - initial_containers = kwargs.get("initial_containers", []) - volumes_json = kwargs.get("volumes", []) - deploy_name = kwargs.get("name") - labels = kwargs.get("labels", {}) - labels.update({"app": deploy_name}) - restart_policy = kwargs.get("restart_policy", "Always") - volumes = [] - for volume in volumes_json: - volume_name = volume.get("name") - host_path = volume.get("host_path", None) - empty_dir = volume.get("empty_dir", None) - parameters = {} - if host_path: - host_path = client.V1HostPathVolumeSource(path=host_path) - parameters.update({"host_path": host_path}) - if empty_dir: - empty_dir = client.V1EmptyDirVolumeSource(**empty_dir) - parameters.update({"empty_dir": empty_dir}) - persistent_volume_claim = volume.get("pvc", None) - if persistent_volume_claim: - persistent_volume_claim = ( - client.V1PersistentVolumeClaimVolumeSource( - claim_name=persistent_volume_claim - ) - ) - parameters.update( - {"persistent_volume_claim": persistent_volume_claim} - ) - volumes.append(client.V1Volume(name=volume_name, **parameters)) - initial_container_pods = self._generate_container_pods( - initial_containers - ) - container_pods = self._generate_container_pods(containers) - pod_spec = client.V1PodSpec( - init_containers=initial_container_pods, - containers=container_pods, - volumes=volumes, - restart_policy=restart_policy, - ) - spec_metadata = client.V1ObjectMeta(labels=labels) - template_spec = client.V1PodTemplateSpec( - metadata=spec_metadata, spec=pod_spec - ) - - LOG.info("template spec %s", template_spec) - - return template_spec - - def create_deployment(self, namespace=None, *args, **kwargs): - deploy_name = kwargs.get("name") - deployment_metadata = client.V1ObjectMeta(name=deploy_name) - template_spec = self._generate_pod_template(*args, **kwargs) - body = client.V1Deployment( - api_version="apps/v1", - kind="Deployment", - metadata=deployment_metadata, - spec=client.V1DeploymentSpec( - selector=client.V1LabelSelector( - match_labels={ - "app": kwargs.get("name"), - } - ), - template=template_spec, - ), - ) - - api_instance = client.AppsV1Api() - - try: - api_instance.create_namespaced_deployment( - namespace=namespace, body=body, pretty="true" - ) - except ApiException as e: - LOG.error("Exception when call AppsV1beta1Api: %s", e) - raise e - - return True - - def create_job(self, namespace=None, *args, **kwargs): - job_name = kwargs.get("name") - job_metadata = client.V1ObjectMeta(name=job_name) - template_spec = self._generate_pod_template( - *args, **kwargs, restart_policy="Never" - ) - body = client.V1Job( - api_version="batch/v1", - kind="Job", - metadata=job_metadata, - spec=client.V1JobSpec(template=template_spec), - ) - api_instance = client.BatchV1Api() - - try: - api_instance.create_namespaced_job( - namespace=namespace, body=body, pretty="true" - ) - except ApiException as e: - LOG.error("Exception when call AppsV1beta1Api: %s", e) - raise e - - return True - - def create_service( - self, - namespace=None, - name=None, - selector=None, - ports=None, - service_type="ClusterIP", - ): - if selector is None: - selector = {} - if ports is None: - ports = [] - - metadata = client.V1ObjectMeta(name=name, labels={"app": name}) - ports = [ - client.V1ServicePort(port=port.get("port"), name=port.get("name")) - for port in ports - ] - spec = client.V1ServiceSpec( - ports=ports, selector=selector, type=service_type - ) - body = client.V1Service( - metadata=metadata, spec=spec, kind="Service", api_version="v1" - ) - - api_instance = client.CoreV1Api() - try: - response = api_instance.create_namespaced_service(namespace, body) - except ApiException as e: - LOG.error("Exception when call CoreV1Api: %s", e) - raise e - - return True, response - - def create_ingress( - self, - namespace=None, - name=None, - service_name=None, - ingress_paths=None, - annotations=None, - ): - if ingress_paths is None: - ingress_paths = [] - if annotations is None: - annotations = {} - - api_instance = client.ExtensionsV1beta1Api() - metadata = client.V1ObjectMeta(name=name, annotations=annotations) - path_list = [] - for ing_path in ingress_paths: - ing_backend = client.V1beta1IngressBackend( - service_name=service_name, service_port=ing_path.get("port", 0) - ) - path_list.append( - client.V1beta1HTTPIngressPath( - path=ing_path.get("path", ""), backend=ing_backend - ) - ) - http_dict = client.V1beta1HTTPIngressRuleValue(paths=path_list) - rule_list = [client.V1beta1IngressRule(http=http_dict, host="")] - ingress_spec = client.V1beta1IngressSpec(rules=rule_list) - body = client.V1beta1Ingress( - api_version="extensions/v1beta1", - metadata=metadata, - spec=ingress_spec, - kind="Ingress", - ) - - try: - api_instance.create_namespaced_ingress( - namespace=namespace, body=body, pretty="true" - ) - except ApiException as e: - LOG.error("Create ingress failed %s", e) - raise e - - return True - - def delete_job(self, namespace=None, name=None): - api_instance = client.BatchV1Api() - delete_options = client.V1DeleteOptions( - propagation_policy="Foreground" - ) - grace_period_seconds = 10 - - try: - api_instance.delete_namespaced_job( - name=name, - namespace=namespace, - body=delete_options, - grace_period_seconds=grace_period_seconds, - pretty="true", - ) - except ApiException as e: - LOG.error("Exception when call AppsV1beta1Api: %s", e) - - def delete_deployment(self, namespace=None, name=None): - api_instance = client.AppsV1Api() - delete_options = client.V1DeleteOptions( - propagation_policy="Foreground" - ) - grace_period_seconds = 10 - - try: - api_instance.delete_namespaced_deployment( - name=name, - namespace=namespace, - body=delete_options, - grace_period_seconds=grace_period_seconds, - pretty="true", - ) - except ApiException as e: - LOG.error("Exception when call AppsV1beta1Api: %s", e) - - def delete_service(self, namespace=None, name=None): - api_instance = client.CoreV1Api() - - try: - api_instance.delete_namespaced_service( - name=name, namespace=namespace - ) - except ApiException as e: - LOG.error("Exception when call CoreV1Api: %s", e) - - def delete_ingress(self, namespace=None, name=None): - api_instance = client.ExtensionsV1beta1Api() - delete_options = client.V1DeleteOptions() - - try: - api_instance.delete_namespaced_ingress( - name=name, - namespace=namespace, - body=delete_options, - pretty="true", - ) - except ApiException as e: - LOG.error("Exception when call AppsV1beta1Api: %s\n" % e) diff --git a/src/agent/kubernetes-agent/src/utils/download.py b/src/agent/kubernetes-agent/src/utils/download.py deleted file mode 100644 index 2d5a2ebbe..000000000 --- a/src/agent/kubernetes-agent/src/utils/download.py +++ /dev/null @@ -1,22 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -import requests -import mimetypes -import os -from uuid import uuid4 - - -def download_file(url, target_dir): - r = requests.get(url, allow_redirects=True) - content_type = r.headers["content-type"] - extension = mimetypes.guess_extension(content_type) - file_name = "%s%s" % (uuid4().hex, extension) - target_file = os.path.join(target_dir, file_name) - - if not os.path.exists(target_dir): - os.makedirs(target_dir) - - open(target_file, "wb").write(r.content) - - return target_file diff --git a/src/agent/kubernetes-agent/src/utils/env.py b/src/agent/kubernetes-agent/src/utils/env.py deleted file mode 100644 index 5a0d7f2ed..000000000 --- a/src/agent/kubernetes-agent/src/utils/env.py +++ /dev/null @@ -1,83 +0,0 @@ -# -# SPDX-License-Identifier: Apache-2.0 -# -import os -import json -from enum import Enum, unique - -# deploy name in kubernetes -DEPLOY_NAME = os.getenv("DEPLOY_NAME") -# network type to deploy, support fabric -NETWORK_TYPE = os.getenv("NETWORK_TYPE") -# network version, for fabric support 1.4 -NETWORK_VERSION = os.getenv("NETWORK_VERSION") -# node type -# fabric: ca, peer, orderer -NODE_TYPE = os.getenv("NODE_TYPE") -# configuration file for kubernetes agent -AGENT_CONFIG_FILE = os.getenv("AGENT_CONFIG_FILE") -AGENT_ID = os.getenv("AGENT_ID") -AGENT_IP = os.getenv("AGENT_IP") -NODE_ID = os.getenv("NODE_ID") -OPERATION = os.getenv("OPERATION") -TOKEN = os.getenv("TOKEN") -NODE_DETAIL_URL = os.getenv("NODE_DETAIL_URL") -NODE_FILE_URL = os.getenv("NODE_FILE_URL") -NODE_SERVICE_PORT = os.getenv("NODE_SERVICE_PORT") -NODE_UPLOAD_FILE_URL = os.getenv("NODE_UPLOAD_FILE_URL") -MAX_QUERY_RETRY = 30 -FABRIC_CA_USER = json.loads(os.getenv("FABRIC_CA_USER", "{}")) -SERVICE_PORTS = json.loads(os.getenv("SERVICE_PORTS", "{}")) -USER_PATCH_URL = os.getenv("USER_PATCH_URL") - -CA_CONFIG = json.loads(os.getenv("FABRIC_CA_CONFIG", "{}")) -PEER_CONFIG = json.loads(os.getenv("FABRIC_PEER_CONFIG", "{}")) -# Initial admin name/password for ca server -CA_ADMIN_NAME = CA_CONFIG.get("admin_name", "admin") -CA_ADMIN_PASSWORD = CA_CONFIG.get("admin_password", "adminpw") - -FABRIC_IMAGE_BASE_NAME = "hyperledger/fabric" - -headers = { - "Authorization": "JWT %s" % TOKEN, - "Content-Type": "application/json", -} - - -@unique -class AgentOperation(Enum): - Create = "create" - Start = "start" - Stop = "stop" - Query = "query" - Update = "update" - Delete = "delete" - FabricCARegister = "fabric:ca:register" - - -@unique -class NodeStatus(Enum): - Deploying = "deploying" - Running = "running" - Stopped = "stopped" - Deleting = "deleting" - Error = "error" - - -@unique -class NetworkType(Enum): - Fabric = "fabric" - - -@unique -class FabricNodeType(Enum): - Ca = "ca" - Orderer = "orderer" - Peer = "peer" - - -@unique -class FabricImages(Enum): - Ca = "%s-ca" % FABRIC_IMAGE_BASE_NAME - Peer = "%s-peer" % FABRIC_IMAGE_BASE_NAME - Orderer = "%s-orderer" % FABRIC_IMAGE_BASE_NAME diff --git a/src/api-engine/.dockerignore b/src/api-engine/.dockerignore new file mode 100644 index 000000000..67c0ff5af --- /dev/null +++ b/src/api-engine/.dockerignore @@ -0,0 +1,2 @@ +cello +!cello/node diff --git a/src/api-engine/Dockerfile.in b/src/api-engine/Dockerfile similarity index 69% rename from src/api-engine/Dockerfile.in rename to src/api-engine/Dockerfile index 76ab1d597..337458a12 100644 --- a/src/api-engine/Dockerfile.in +++ b/src/api-engine/Dockerfile @@ -1,17 +1,15 @@ FROM python:3.8 # Install software -RUN apt-get update \ - && apt-get install -y gettext-base graphviz libgraphviz-dev vim \ - && apt-get autoclean \ - && apt-get clean \ - && apt-get autoremove && rm -rf /var/cache/apt/ - -# Set the working dir -WORKDIR /var/www/server +RUN apt-get update\ + && apt-get install -y gettext-base graphviz libgraphviz-dev vim\ + && apt-get autoclean\ + && apt-get clean\ + && apt-get autoremove\ + && rm -rf /var/cache/apt/ # Copy source code to the working dir -COPY src/api-engine ./ +COPY . . # Install compiled code tools from Artifactory and copy it to opt folder. RUN curl -L --retry 5 --retry-delay 3 "https://github.com/hyperledger/fabric/releases/download/v2.5.13/hyperledger-fabric-linux-amd64-2.5.10.tar.gz" | tar xz -C ./cello/ @@ -19,6 +17,4 @@ RUN curl -L --retry 5 --retry-delay 3 "https://github.com/hyperledger/fabric/rel # Install python dependencies RUN pip3 install -r requirements.txt -ENV RUN_MODE=server - CMD ["bash", "entrypoint.sh"] diff --git a/src/api-engine/chaincode/serializers.py b/src/api-engine/chaincode/serializers.py index 07ce6a74b..ea2edd910 100644 --- a/src/api-engine/chaincode/serializers.py +++ b/src/api-engine/chaincode/serializers.py @@ -1,4 +1,5 @@ import tarfile +from typing import List from django.core.validators import MinValueValidator from rest_framework import serializers @@ -92,6 +93,18 @@ def validate_channel(self, value: Channel): raise serializers.ValidationError("You can only install chaincodes on your organization.") return value + def validate_peers(self, value: List[Node]): + for node in value: + if Node.Type.PEER != node.type: + raise serializers.ValidationError( + "Node {} is not a peer but a/an {} instead.".format(node.id, node.type) + ) + if node.organization != self.context["organization"]: + raise serializers.ValidationError( + "Node {} does not belong to your organization.".format(node.id) + ) + return value + def create(self, validated_data) -> ChaincodeID: validated_data["user"] = self.context["user"] validated_data["organization"] = self.context["organization"] diff --git a/src/api-engine/chaincode/service.py b/src/api-engine/chaincode/service.py index 649a9bea1..050cb64f1 100644 --- a/src/api-engine/chaincode/service.py +++ b/src/api-engine/chaincode/service.py @@ -113,7 +113,7 @@ def create_chaincode( orderer_domain_name = get_domain_name( organization.name, Node.Type.ORDERER, - Node.objects.filter(organization=organization).first().name + Node.objects.filter(type=Node.Type.ORDERER, organization=organization).first().name ) command = [ peer_command, diff --git a/src/api-engine/channel/views.py b/src/api-engine/channel/views.py index 8270cf6cf..4a03c38d7 100644 --- a/src/api-engine/channel/views.py +++ b/src/api-engine/channel/views.py @@ -31,10 +31,10 @@ def list(self, request): p = serializer.get_paginator(Channel.objects.filter(organizations__id__contains=request.user.organization.id)) return Response( status=status.HTTP_200_OK, - data=ChannelList({ + data=ok(ChannelList({ "total": p.count, "data": ChannelResponse(p.page(serializer.data["page"]).object_list, many=True).data, - }).data, + }).data), ) @swagger_auto_schema( @@ -49,4 +49,5 @@ def create(self, request): serializer.is_valid(raise_exception=True) return Response( status=status.HTTP_201_CREATED, - data=serializer.save().data) + data=ok(serializer.save().data) + ) diff --git a/src/api-engine/entrypoint.sh b/src/api-engine/entrypoint.sh index 85f68ba36..85762d50f 100755 --- a/src/api-engine/entrypoint.sh +++ b/src/api-engine/entrypoint.sh @@ -1,14 +1,9 @@ -#!/usr/bin/env bash +#!/usr/bin/bash holdup -t 120 tcp://${DB_HOST:-localhost}:${DB_PORT:-5432}; python manage.py migrate; -python manage.py create_user \ - --username ${API_ENGINE_ADMIN_EMAIL:-admin@cello.com} \ - --password ${API_ENGINE_ADMIN_PASSWORD:-pass} \ - --email ${API_ENGINE_ADMIN_EMAIL:-admin@cello.com} \ - --is_superuser \ - --role admin -if [[ "${DEBUG:-True,,}" == "true" ]]; then # For dev, use pure Django directly +DEBUG="${DEBUG:-True}" +if [[ "${DEBUG,,}" == "true" ]]; then # For dev, use pure Django directly python manage.py runserver 0.0.0.0:8080; else # For production, use uwsgi in front uwsgi --ini server.ini; diff --git a/src/api-engine/node/views.py b/src/api-engine/node/views.py index f14167fab..faf59fa3b 100644 --- a/src/api-engine/node/views.py +++ b/src/api-engine/node/views.py @@ -26,13 +26,12 @@ class NodeViewSet(viewsets.ViewSet): def list(self, request): serializer = PageQuerySerializer(data=request.GET) p = serializer.get_paginator(Node.objects.filter(organization=request.user.organization)) - response = NodeList({ - "total": p.count, - "data": NodeResponse(p.page(serializer.data['page']).object_list, many=True).data - }) return Response( status=status.HTTP_200_OK, - data=ok(response.data), + data=ok(NodeList({ + "total": p.count, + "data": NodeResponse(p.page(serializer.data['page']).object_list, many=True).data + }).data), ) @swagger_auto_schema( @@ -45,9 +44,8 @@ def list(self, request): def create(self, request): serializer = NodeCreateBody(data=request.data, context={"organization": request.user.organization}) serializer.is_valid(raise_exception=True) - response = NodeID(serializer.save().__dict__) return Response( status=status.HTTP_201_CREATED, - data=ok(response.data), + data=ok(NodeID(serializer.save().__dict__).data), ) diff --git a/src/api-engine/requirements.txt b/src/api-engine/requirements.txt index 08b44d1d0..ea890607d 100644 --- a/src/api-engine/requirements.txt +++ b/src/api-engine/requirements.txt @@ -10,7 +10,7 @@ django-filter==2.4.0 django-rest-auth==0.9.5 djangorestframework==3.15.2 djangorestframework-simplejwt==5.3.1 -docker==4.2.0 +docker==7.1.0 drf-yasg==1.21.7 flex==6.14.1 fqdn==1.5.1 diff --git a/src/dashboard/Dockerfile b/src/dashboard/Dockerfile new file mode 100644 index 000000000..b30b6acd9 --- /dev/null +++ b/src/dashboard/Dockerfile @@ -0,0 +1,14 @@ +FROM node:20.15 + +WORKDIR /usr/src/app/ +USER root +COPY . . +RUN export NODE_OPTIONS=--openssl-legacy-provider && yarn --network-timeout 600000 && yarn run build + +FROM nginx:1.15.12 +COPY --from=0 /usr/src/app/dist /usr/share/nginx/html +COPY cello.conf /etc/nginx/conf.d/ + +EXPOSE 8081 + +CMD ["bash", "-c", "nginx -g 'daemon off;'"] diff --git a/src/dashboard/cello.conf b/src/dashboard/cello.conf new file mode 100644 index 000000000..7e9585df7 --- /dev/null +++ b/src/dashboard/cello.conf @@ -0,0 +1,32 @@ +server { + listen 8081; + server_name localhost; + + gzip on; + gzip_min_length 1k; + gzip_comp_level 9; + gzip_types text/plain application/javascript application/x-javascript text/css application/xml text/javascript application/x-httpd-php image/jpeg image/gif image/png; + gzip_vary on; + gzip_disable "MSIE [1-6]\."; + + root /usr/share/nginx/html; + index index.html index.html; + + client_max_body_size 50M; + + location / { + try_files $uri /index.html; + } + + location /api { + proxy_pass http://cello-api-engine:8080; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + + location ~ /\. { + deny all; + } +} diff --git a/src/dashboard/src/models/chaincode.js b/src/dashboard/src/models/chaincode.js index 0f8f00818..2e8874b8b 100644 --- a/src/dashboard/src/models/chaincode.js +++ b/src/dashboard/src/models/chaincode.js @@ -1,5 +1,6 @@ import { listChainCode, + createChainCode, uploadChainCode, installChainCode, approveChainCode, @@ -36,6 +37,12 @@ export default { }, }); }, + *createChainCode({ payload, callback }, { call }) { + const response = yield call(createChainCode, payload); + if (callback) { + callback(response); + } + }, *uploadChainCode({ payload, callback }, { call }) { const response = yield call(uploadChainCode, payload); if (callback) { diff --git a/src/dashboard/src/models/channel.js b/src/dashboard/src/models/channel.js index a90fd46dc..621452e0c 100644 --- a/src/dashboard/src/models/channel.js +++ b/src/dashboard/src/models/channel.js @@ -27,7 +27,7 @@ export default { type: 'save', payload: { pagination, - channels: response.data, + channels: response.data.data, }, }); }, diff --git a/src/dashboard/src/models/login.js b/src/dashboard/src/models/login.js index dfe8d9749..c15e7d4bf 100644 --- a/src/dashboard/src/models/login.js +++ b/src/dashboard/src/models/login.js @@ -52,11 +52,12 @@ export default { *register({ payload }, { call, put }) { const response = yield call(register, payload); + const isSuccessful = response.status.toLowerCase() === 'successful'; yield put({ type: 'changeRegisterStatus', payload: { - success: response.status === 'successful', - msg: response.status === 'successful' ? 'Register successfully!' : response.msg, + success: isSuccessful, + msg: isSuccessful ? 'Register successfully!' : response.msg, }, }); }, diff --git a/src/dashboard/src/pages/ChainCode/ChainCode.js b/src/dashboard/src/pages/ChainCode/ChainCode.js index ec1dbb0db..2e6813ca1 100644 --- a/src/dashboard/src/pages/ChainCode/ChainCode.js +++ b/src/dashboard/src/pages/ChainCode/ChainCode.js @@ -57,7 +57,7 @@ class ChainCode extends PureComponent { const { dispatch } = this.props; dispatch({ - type: 'chainCode/listNode', + type: 'node/listNode', }); }; @@ -165,14 +165,13 @@ class ChainCode extends PureComponent { const { dispatch } = this.props; const formData = new FormData(); - Object.keys(values) - .filter(key => !(key === 'description' && !values[key])) // filter out empty description - .forEach(key => { - formData.append(key, values[key]); - }); + /* convert object to FormData */ + Object.keys(values).forEach(key => { + formData.append(key, values[key]); + }); dispatch({ - type: 'chainCode/uploadChainCode', + type: 'chainCode/createChainCode', payload: formData, callback, }); diff --git a/src/dashboard/src/pages/ChainCode/forms/UploadForm.js b/src/dashboard/src/pages/ChainCode/forms/UploadForm.js index 062eae720..926bbc6c6 100644 --- a/src/dashboard/src/pages/ChainCode/forms/UploadForm.js +++ b/src/dashboard/src/pages/ChainCode/forms/UploadForm.js @@ -1,14 +1,38 @@ -import React from 'react'; +import { useState, useEffect } from 'react'; import { injectIntl, useIntl } from 'umi'; -import { Button, Modal, Input, Upload, message } from 'antd'; +import { Button, Modal, Input, Upload, message, Switch, Select, InputNumber, Tag } from 'antd'; import { UploadOutlined } from '@ant-design/icons'; import { Form } from 'antd/lib/index'; +import { listNode } from '@/services/node'; +import { listChannel } from '@/services/channel'; +import styles from '../styles.less'; const FormItem = Form.Item; +const tagRender = props => { + const { label, closable, onClose } = props; + const onPreventMouseDown = event => { + event.preventDefault(); + event.stopPropagation(); + }; + return ( + + {label} + + ); +}; + const UploadForm = props => { const [form] = Form.useForm(); const intl = useIntl(); + const [nodes, setNodes] = useState(); + const [channels, setChannels] = useState(); const { modalVisible, handleUpload, @@ -19,8 +43,28 @@ const UploadForm = props => { setFile, } = props; + useEffect(() => { + async function fecthData() { + const responseNodes = await listNode(); + const responseChannels = await listChannel(); + const nodeOptions = responseNodes.data.data + .filter(node => node.type.toLowerCase() === 'peer') + .map(node => ({ + label: node.name, + value: node.id, + })); + const channelOptions = responseChannels.data.data.map(channel => ({ + label: channel.name, + value: channel.id, + })); + setNodes(nodeOptions); + setChannels(channelOptions); + } + fecthData(); + }, []); + const uploadCallback = response => { - if (response.status !== 'successful') { + if (response.status.toLowerCase() !== 'successful') { message.error( intl.formatMessage({ id: 'app.chainCode.form.create.fail', @@ -97,7 +141,7 @@ const UploadForm = props => { id: 'app.chainCode.form.create.file', defaultMessage: 'Package', })} - name="file" + name="package" getValueFromEvent={normFile} rules={[ { @@ -120,6 +164,106 @@ const UploadForm = props => { + + + + + + + + + + + + + + + + + + { const { modalVisible, handleCreate, handleModalVisible, nodes, creating, fetchChannels } = props; const createCallback = response => { - if (response.status !== 'successful') { + if (response.status.toLowerCase() !== 'successful') { message.error( intl.formatMessage({ id: 'app.channel.form.create.fail', @@ -62,7 +62,7 @@ const CreateChannel = props => { const orderers = []; Object.keys(nodes).forEach(item => { - if (nodes[item].type === 'peer') { + if (nodes[item].type.toLowerCase() === 'peer') { peers.push({ label: nodes[item].name, value: nodes[item].id }); } else { orderers.push({ label: nodes[item].name, value: nodes[item].id }); @@ -133,7 +133,7 @@ const CreateChannel = props => { id: 'app.channel.form.create.orderer', defaultMessage: 'Please select orderer', })} - name="orderers" + name="orderer_ids" rules={[ { required: true, @@ -157,7 +157,7 @@ const CreateChannel = props => { id: 'app.channel.form.create.peer', defaultMessage: 'Peer', })} - name="peers" + name="peer_ids" rules={[ { required: true, diff --git a/src/dashboard/src/pages/Node/index.js b/src/dashboard/src/pages/Node/index.js index 8734f0d6b..70c79bc03 100644 --- a/src/dashboard/src/pages/Node/index.js +++ b/src/dashboard/src/pages/Node/index.js @@ -182,7 +182,7 @@ const CreateNode = props => { const { createModalVisible, handleCreate, handleModalVisible, creating, queryNodeList } = props; const createCallback = response => { - if (response.status !== 'successful') { + if (response.status.toLowerCase() !== 'successful') { message.error( intl.formatMessage({ id: 'app.node.new.createFail', @@ -222,7 +222,7 @@ const CreateNode = props => { }, }; - const types = ['orderer', 'peer']; + const types = ['ORDERER', 'PEER']; const typeOptions = types.map(item => (